diff --git a/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py b/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py deleted file mode 100644 index 8c859c080a9ac63ea90fec07c6640486c657cb05..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py +++ /dev/null @@ -1,49 +0,0 @@ -import os,sys -import json -import subprocess -# from ...typing import sha256, Dict, get_type_hints - -url = 'https://b.ai-huan.xyz' -model = ['gpt-3.5-turbo', 'gpt-4'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'messages': messages, - 'model': model}, separators=(',', ':')) - cmd = ['python', f'{path}/helpers/binghuan.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - yield line.decode('cp1252') - - - -# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ -# '(%s)' % ', '.join( -# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) - - -# Temporary For ChatCompletion Class -class ChatCompletion: - @staticmethod - def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and needs_auth and not auth: - print( - f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - return (_create_completion(model, messages, stream, **kwargs) - if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) \ No newline at end of file diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/app.py b/spaces/123Kumar/vits-uma-genshin-honkai123/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
VITS语音在线合成demo\n" - "
主要有赛马娘,原神中文,原神日语,崩坏3的音色
" - '
结果有随机性,语调可能很奇怪,可多次生成取最佳效果
' - '
标点符号会影响生成的结果
' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cricket BYOD Compatibility List What You Need to Know Before You Switch.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cricket BYOD Compatibility List What You Need to Know Before You Switch.md deleted file mode 100644 index fb03c682d25c60f7ae1d712ab480e40b2443c9f8..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cricket BYOD Compatibility List What You Need to Know Before You Switch.md +++ /dev/null @@ -1,40 +0,0 @@ -
-

How to Check if Your Phone is Compatible with Cricket's BYOD Program

-

Cricket Wireless is a prepaid wireless service provider that offers a variety of plans and features for customers who want to bring their own device (BYOD) to the network. However, not all devices are compatible with Cricket's network, so you need to check your phone's compatibility before you switch.

-

cricket byod compatibility list


Download Filehttps://byltly.com/2uKzbu



-

In this article, we will explain how to check if your phone is compatible with Cricket's BYOD program, what are the requirements and benefits of using your own device on Cricket, and what are some of the compatible devices that you can bring to Cricket.

-

How to Check Your Phone's Compatibility

-

The easiest way to check if your phone is compatible with Cricket's network is to use their online IMEI checker tool. IMEI stands for International Mobile Equipment Identity, and it is a unique 15-digit number that identifies your device. You can find your IMEI by dialing *#06# on your phone's keypad, or by looking in your phone's settings or on the back of your device.

-

Once you have your IMEI, go to https://www.cricketwireless.com/cell-phones/bring-your-phone and enter it in the box. The tool will tell you if your phone is compatible with Cricket's network, and if it is eligible for HD Voice, which is a feature that enhances the quality and clarity of voice calls.

-

If your phone is not compatible, you may need to unlock it from your current carrier, or buy a new device that works on Cricket's network. You can also check out Cricket's list of compatible devices here.

-

What are the Requirements and Benefits of BYOD

-

To bring your own device to Cricket, you need to meet the following requirements:

-

- -

By bringing your own device to Cricket, you can enjoy the following benefits:

- -

Some Compatible Devices You Can Bring to Cricket

-

Cricket has a wide range of compatible devices that you can bring to their network, including smartphones, feature phones, tablets, and data-only devices. Here are some examples of compatible devices that you can bring to Cricket:

- - - - - - - - - - - - - - -

Los comentarios negativos de Agar.io apk

- - - -

Este juego es terrible! Es muy defectuoso y lento. No me gusta cómo el juego se congela o se bloquea todo el tiempo, y me hace perder mi conexión o mi progreso. El juego es muy buggy e inestable. La peor parte es que tiene demasiados hackers y tramposos que arruinan el juego para todos los demás. -

- -

Conclusión

- -

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Agar.io apk:

-

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Billyosoro/ESRGAN/realesrgan/models/realesrgan_model.py b/spaces/Billyosoro/ESRGAN/realesrgan/models/realesrgan_model.py deleted file mode 100644 index c298a09c42433177f90001a0a31d029576072ccd..0000000000000000000000000000000000000000 --- a/spaces/Billyosoro/ESRGAN/realesrgan/models/realesrgan_model.py +++ /dev/null @@ -1,258 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.srgan_model import SRGANModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from collections import OrderedDict -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRGANModel(SRGANModel): - """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRGANModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt_usm, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, - self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue - self.gt_usm = self.usm_sharpener(self.gt) - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True - - def optimize_parameters(self, current_iter): - # usm sharpening - l1_gt = self.gt_usm - percep_gt = self.gt_usm - gan_gt = self.gt_usm - if self.opt['l1_gt_usm'] is False: - l1_gt = self.gt - if self.opt['percep_gt_usm'] is False: - percep_gt = self.gt - if self.opt['gan_gt_usm'] is False: - gan_gt = self.gt - - # optimize net_g - for p in self.net_d.parameters(): - p.requires_grad = False - - self.optimizer_g.zero_grad() - self.output = self.net_g(self.lq) - - l_g_total = 0 - loss_dict = OrderedDict() - if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): - # pixel loss - if self.cri_pix: - l_g_pix = self.cri_pix(self.output, l1_gt) - l_g_total += l_g_pix - loss_dict['l_g_pix'] = l_g_pix - # perceptual loss - if self.cri_perceptual: - l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt) - if l_g_percep is not None: - l_g_total += l_g_percep - loss_dict['l_g_percep'] = l_g_percep - if l_g_style is not None: - l_g_total += l_g_style - loss_dict['l_g_style'] = l_g_style - # gan loss - fake_g_pred = self.net_d(self.output) - l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) - l_g_total += l_g_gan - loss_dict['l_g_gan'] = l_g_gan - - l_g_total.backward() - self.optimizer_g.step() - - # optimize net_d - for p in self.net_d.parameters(): - p.requires_grad = True - - self.optimizer_d.zero_grad() - # real - real_d_pred = self.net_d(gan_gt) - l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) - loss_dict['l_d_real'] = l_d_real - loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) - l_d_real.backward() - # fake - fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9 - l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) - loss_dict['l_d_fake'] = l_d_fake - loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) - l_d_fake.backward() - self.optimizer_d.step() - - if self.ema_decay > 0: - self.model_ema(decay=self.ema_decay) - - self.log_dict = self.reduce_loss_dict(loss_dict) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp deleted file mode 100644 index 2850d4ad767b26c7bf6a3b8559939e796189141b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -#include "../box_iou_rotated/box_iou_rotated_utils.h" -#include "nms_rotated.h" - -namespace detectron2 { - -template -at::Tensor nms_rotated_cpu_kernel( - const at::Tensor& dets, - const at::Tensor& scores, - const float iou_threshold) { - // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, - // however, the code in this function is much shorter because - // we delegate the IoU computation for rotated boxes to - // the single_box_iou_rotated function in box_iou_rotated_utils.h - AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); - AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor"); - AT_ASSERTM( - dets.type() == scores.type(), "dets should have the same type as scores"); - - if (dets.numel() == 0) { - return at::empty({0}, dets.options().dtype(at::kLong)); - } - - auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); - - auto ndets = dets.size(0); - at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); - at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); - - auto suppressed = suppressed_t.data_ptr(); - auto keep = keep_t.data_ptr(); - auto order = order_t.data_ptr(); - - int64_t num_to_keep = 0; - - for (int64_t _i = 0; _i < ndets; _i++) { - auto i = order[_i]; - if (suppressed[i] == 1) { - continue; - } - - keep[num_to_keep++] = i; - - for (int64_t _j = _i + 1; _j < ndets; _j++) { - auto j = order[_j]; - if (suppressed[j] == 1) { - continue; - } - - auto ovr = single_box_iou_rotated( - dets[i].data_ptr(), dets[j].data_ptr()); - if (ovr >= iou_threshold) { - suppressed[j] = 1; - } - } - } - return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); -} - -at::Tensor nms_rotated_cpu( - const at::Tensor& dets, - const at::Tensor& scores, - const float iou_threshold) { - auto result = at::empty({0}, dets.options()); - - AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] { - result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); - }); - return result; -} - -} // namespace detectron2 diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/mask_ops.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/mask_ops.py deleted file mode 100644 index 6ef2053050b4b37dcd49ea0387dbf3fcf086a834..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/mask_ops.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import numpy as np -import torch -from PIL import Image -from torch.nn import functional as F - -__all__ = ["paste_masks_in_image"] - - -BYTES_PER_FLOAT = 4 -# TODO: This memory limit may be too much or too little. It would be better to -# determine it based on available resources. -GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit - - -def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): - """ - Args: - masks: N, 1, H, W - boxes: N, 4 - img_h, img_w (int): - skip_empty (bool): only paste masks within the region that - tightly bound all boxes, and returns the results this region only. - An important optimization for CPU. - - Returns: - if skip_empty == False, a mask of shape (N, img_h, img_w) - if skip_empty == True, a mask of shape (N, h', w'), and the slice - object for the corresponding region. - """ - # On GPU, paste all masks together (up to chunk size) - # by using the entire image to sample the masks - # Compared to pasting them one by one, - # this has more operations but is faster on COCO-scale dataset. - device = masks.device - if skip_empty: - x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( - dtype=torch.int32 - ) - x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) - y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) - else: - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 - - N = masks.shape[0] - - img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 - img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - # img_x, img_y have shapes (N, w), (N, h) - - gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) - gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) - grid = torch.stack([gx, gy], dim=3) - - img_masks = F.grid_sample(masks.to(dtype=torch.float32), grid, align_corners=False) - - if skip_empty: - return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) - else: - return img_masks[:, 0], () - - -def paste_masks_in_image(masks, boxes, image_shape, threshold=0.5): - """ - Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. - The location, height, and width for pasting each mask is determined by their - corresponding bounding boxes in boxes. - - Note: - This is a complicated but more accurate implementation. In actual deployment, it is - often enough to use a faster but less accurate implementation. - See :func:`paste_mask_in_image_old` in this file for an alternative implementation. - - Args: - masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of - detected object instances in the image and Hmask, Wmask are the mask width and mask - height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. - boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). - boxes[i] and masks[i] correspond to the same object instance. - image_shape (tuple): height, width - threshold (float): A threshold in [0, 1] for converting the (soft) masks to - binary masks. - - Returns: - img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the - number of detected object instances and Himage, Wimage are the image width - and height. img_masks[i] is a binary mask for object instance i. - """ - - assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" - N = len(masks) - if N == 0: - return masks.new_empty((0,) + image_shape, dtype=torch.uint8) - if not isinstance(boxes, torch.Tensor): - boxes = boxes.tensor - device = boxes.device - assert len(boxes) == N, boxes.shape - - img_h, img_w = image_shape - - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == "cpu": - # CPU is most efficient when they are pasted one by one with skip_empty=True - # so that it performs minimal number of operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, but may have memory issue - num_chunks = int(np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) - assert ( - num_chunks <= N - ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - img_masks = torch.zeros( - N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 - ) - for inds in chunks: - masks_chunk, spatial_inds = _do_paste_mask( - masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" - ) - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - img_masks[(inds,) + spatial_inds] = masks_chunk - return img_masks - - -# The below are the original paste function (from Detectron1) which has -# larger quantization error. -# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. - - -def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): - """ - Paste a single mask in an image. - This is a per-box implementation of :func:`paste_masks_in_image`. - This function has larger quantization error due to incorrect pixel - modeling and is not used any more. - - Args: - mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single - object instance. Values are in [0, 1]. - box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners - of the object instance. - img_h, img_w (int): Image height and width. - threshold (float): Mask binarization threshold in [0, 1]. - - Returns: - im_mask (Tensor): - The resized and binarized object mask pasted into the original - image plane (a tensor of shape (img_h, img_w)). - """ - # Conversion from continuous box coordinates to discrete pixel coordinates - # via truncation (cast to int32). This determines which pixels to paste the - # mask onto. - box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion - # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to - # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 - # pixels (not x1 - x0 pixels). - samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width - samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height - - # Resample the mask from it's original grid to the new samples_w x samples_h grid - mask = Image.fromarray(mask.cpu().numpy()) - mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) - mask = np.array(mask, copy=False) - - if threshold >= 0: - mask = np.array(mask > threshold, dtype=np.uint8) - mask = torch.from_numpy(mask) - else: - # for visualization and debugging, we also - # allow it to return an unmodified mask - mask = torch.from_numpy(mask * 255).to(torch.uint8) - - im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) - x_0 = max(box[0], 0) - x_1 = min(box[2] + 1, img_w) - y_0 = max(box[1], 0) - y_1 = min(box[3] + 1, img_h) - - im_mask[y_0:y_1, x_0:x_1] = mask[ - (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) - ] - return im_mask - - -# Our pixel modeling requires extrapolation for any continuous -# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, -# we would like this extrapolation to be an interpolation between boundary values and zero, -# instead of using absolute zero or boundary values. -# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: -# masks, scale = pad_masks(masks[:, 0, :, :], 1) -# boxes = scale_boxes(boxes.tensor, scale) - - -def pad_masks(masks, padding): - """ - Args: - masks (tensor): A tensor of shape (B, M, M) representing B masks. - padding (int): Number of cells to pad on all sides. - - Returns: - The padded masks and the scale factor of the padding size / original size. - """ - B = masks.shape[0] - M = masks.shape[-1] - pad2 = 2 * padding - scale = float(M + pad2) / M - padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) - padded_masks[:, padding:-padding, padding:-padding] = masks - return padded_masks, scale - - -def scale_boxes(boxes, scale): - """ - Args: - boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 - coords representing the corners x0, y0, x1, y1, - scale (float): The box scaling factor. - - Returns: - Scaled boxes. - """ - w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 - h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 - x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 - y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 - - w_half *= scale - h_half *= scale - - scaled_boxes = torch.zeros_like(boxes) - scaled_boxes[:, 0] = x_c - w_half - scaled_boxes[:, 2] = x_c + w_half - scaled_boxes[:, 1] = y_c - h_half - scaled_boxes[:, 3] = y_c + h_half - return scaled_boxes diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/convert-torchvision-to-d2.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/convert-torchvision-to-d2.py deleted file mode 100644 index 18a24e4ef96d34a4a0d1f43debc2276260da1a2b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/convert-torchvision-to-d2.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import pickle as pkl -import sys -import torch - -""" -Usage: - # download one of the ResNet{18,34,50,101,152} models from torchvision: - wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth - # run the conversion - ./convert-torchvision-to-d2.py r50.pth r50.pkl - - # Then, use r50.pkl with the following changes in config: - -MODEL: - WEIGHTS: "/path/to/r50.pkl" - PIXEL_MEAN: [123.675, 116.280, 103.530] - PIXEL_STD: [58.395, 57.120, 57.375] - RESNETS: - DEPTH: 50 - STRIDE_IN_1X1: False -INPUT: - FORMAT: "RGB" - - These models typically produce slightly worse results than the - pre-trained ResNets we use in official configs, which are the - original ResNet models released by MSRA. -""" - -if __name__ == "__main__": - input = sys.argv[1] - - obj = torch.load(input, map_location="cpu") - - newmodel = {} - for k in list(obj.keys()): - old_k = k - if "layer" not in k: - k = "stem." + k - for t in [1, 2, 3, 4]: - k = k.replace("layer{}".format(t), "res{}".format(t + 1)) - for t in [1, 2, 3]: - k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) - k = k.replace("downsample.0", "shortcut") - k = k.replace("downsample.1", "shortcut.norm") - print(old_k, "->", k) - newmodel[k] = obj.pop(old_k).detach().numpy() - - res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True} - - with open(sys.argv[2], "wb") as f: - pkl.dump(res, f) - if obj: - print("Unconverted keys:", obj.keys()) diff --git a/spaces/CVPR/LIVE/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h b/spaces/CVPR/LIVE/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h deleted file mode 100644 index 6669350eae5ce8049dee431ba1bb07d89ce86834..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -namespace thrust -{ - -namespace random -{ - -namespace detail -{ - -template - struct linear_feedback_shift_engine_wordmask -{ - static const T value = - (T(1u) << i) | - linear_feedback_shift_engine_wordmask::value; -}; // end linear_feedback_shift_engine_wordmask - -template - struct linear_feedback_shift_engine_wordmask -{ - static const T value = 0; -}; // end linear_feedback_shift_engine_wordmask - -} // end detail - -} // end random - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce.h deleted file mode 100644 index e09652cd9fd6a30fd1673d1bbd33313b23450a3f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits reduce -#include - diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/base_sampler.py b/spaces/CVPR/WALT/mmdet/core/bbox/samplers/base_sampler.py deleted file mode 100644 index 9ea35def115b49dfdad8a1f7c040ef3cd983b0d1..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/base_sampler.py +++ /dev/null @@ -1,101 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import torch - -from .sampling_result import SamplingResult - - -class BaseSampler(metaclass=ABCMeta): - """Base class of samplers.""" - - def __init__(self, - num, - pos_fraction, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - self.num = num - self.pos_fraction = pos_fraction - self.neg_pos_ub = neg_pos_ub - self.add_gt_as_proposals = add_gt_as_proposals - self.pos_sampler = self - self.neg_sampler = self - - @abstractmethod - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive samples.""" - pass - - @abstractmethod - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Sample negative samples.""" - pass - - def sample(self, - assign_result, - bboxes, - gt_bboxes, - gt_labels=None, - **kwargs): - """Sample positive and negative bboxes. - - This is a simple implementation of bbox sampling given candidates, - assigning results and ground truth bboxes. - - Args: - assign_result (:obj:`AssignResult`): Bbox assigning results. - bboxes (Tensor): Boxes to be sampled from. - gt_bboxes (Tensor): Ground truth bboxes. - gt_labels (Tensor, optional): Class labels of ground truth bboxes. - - Returns: - :obj:`SamplingResult`: Sampling result. - - Example: - >>> from mmdet.core.bbox import RandomSampler - >>> from mmdet.core.bbox import AssignResult - >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes - >>> rng = ensure_rng(None) - >>> assign_result = AssignResult.random(rng=rng) - >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) - >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) - >>> gt_labels = None - >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, - >>> add_gt_as_proposals=False) - >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) - """ - if len(bboxes.shape) < 2: - bboxes = bboxes[None, :] - - bboxes = bboxes[:, :4] - - gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) - if self.add_gt_as_proposals and len(gt_bboxes) > 0: - if gt_labels is None: - raise ValueError( - 'gt_labels must be given when add_gt_as_proposals is True') - bboxes = torch.cat([gt_bboxes, bboxes], dim=0) - assign_result.add_gt_(gt_labels) - gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) - gt_flags = torch.cat([gt_ones, gt_flags]) - - num_expected_pos = int(self.num * self.pos_fraction) - pos_inds = self.pos_sampler._sample_pos( - assign_result, num_expected_pos, bboxes=bboxes, **kwargs) - # We found that sampled indices have duplicated items occasionally. - # (may be a bug of PyTorch) - pos_inds = pos_inds.unique() - num_sampled_pos = pos_inds.numel() - num_expected_neg = self.num - num_sampled_pos - if self.neg_pos_ub >= 0: - _pos = max(1, num_sampled_pos) - neg_upper_bound = int(self.neg_pos_ub * _pos) - if num_expected_neg > neg_upper_bound: - num_expected_neg = neg_upper_bound - neg_inds = self.neg_sampler._sample_neg( - assign_result, num_expected_neg, bboxes=bboxes, **kwargs) - neg_inds = neg_inds.unique() - - sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags) - return sampling_result diff --git a/spaces/CVPR/WALT/mmdet/utils/logger.py b/spaces/CVPR/WALT/mmdet/utils/logger.py deleted file mode 100644 index 6fc6e6b438a73e857ba6f173594985807cb88b30..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/utils/logger.py +++ /dev/null @@ -1,19 +0,0 @@ -import logging - -from mmcv.utils import get_logger - - -def get_root_logger(log_file=None, log_level=logging.INFO): - """Get root logger. - - Args: - log_file (str, optional): File path of log. Defaults to None. - log_level (int, optional): The level of logger. - Defaults to logging.INFO. - - Returns: - :obj:`logging.Logger`: The obtained logger - """ - logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) - - return logger diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/mask_decoder.py b/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/mask_decoder.py deleted file mode 100644 index 3e86f7cc9ad95582a08ef2531c68d03fa4af8d99..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/mask_decoder.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import nn -from torch.nn import functional as F - -from typing import List, Tuple, Type - -from .common import LayerNorm2d - - -class MaskDecoder(nn.Module): - def __init__( - self, - *, - transformer_dim: int, - transformer: nn.Module, - num_multimask_outputs: int = 3, - activation: Type[nn.Module] = nn.GELU, - iou_head_depth: int = 3, - iou_head_hidden_dim: int = 256, - ) -> None: - """ - Predicts masks given an image and prompt embeddings, using a - tranformer architecture. - - Arguments: - transformer_dim (int): the channel dimension of the transformer - transformer (nn.Module): the transformer used to predict masks - num_multimask_outputs (int): the number of masks to predict - when disambiguating masks - activation (nn.Module): the type of activation to use when - upscaling masks - iou_head_depth (int): the depth of the MLP used to predict - mask quality - iou_head_hidden_dim (int): the hidden dimension of the MLP - used to predict mask quality - """ - super().__init__() - self.transformer_dim = transformer_dim - self.transformer = transformer - - self.num_multimask_outputs = num_multimask_outputs - - self.iou_token = nn.Embedding(1, transformer_dim) - self.num_mask_tokens = num_multimask_outputs + 1 - self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) - - self.output_upscaling = nn.Sequential( - nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), - LayerNorm2d(transformer_dim // 4), - activation(), - nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), - activation(), - ) - self.output_hypernetworks_mlps = nn.ModuleList( - [ - MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) - for i in range(self.num_mask_tokens) - ] - ) - - self.iou_prediction_head = MLP( - transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth - ) - - def forward( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - multimask_output: bool, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Predict masks given image and prompt embeddings. - - Arguments: - image_embeddings (torch.Tensor): the embeddings from the image encoder - image_pe (torch.Tensor): positional encoding with the shape of image_embeddings - sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes - dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs - multimask_output (bool): Whether to return multiple masks or a single - mask. - - Returns: - torch.Tensor: batched predicted masks - torch.Tensor: batched predictions of mask quality - """ - masks, iou_pred = self.predict_masks( - image_embeddings=image_embeddings, - image_pe=image_pe, - sparse_prompt_embeddings=sparse_prompt_embeddings, - dense_prompt_embeddings=dense_prompt_embeddings, - ) - - # Select the correct mask or masks for outptu - if multimask_output: - mask_slice = slice(1, None) - else: - mask_slice = slice(0, 1) - masks = masks[:, mask_slice, :, :] - iou_pred = iou_pred[:, mask_slice] - - # Prepare output - return masks, iou_pred - - def predict_masks( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Predicts masks. See 'forward' for more details.""" - # Concatenate output tokens - output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) - output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) - tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) - - # Expand per-image data in batch direction to be per-mask - src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) - src = src + dense_prompt_embeddings - pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) - b, c, h, w = src.shape - - # Run the transformer - hs, src = self.transformer(src, pos_src, tokens) - iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] - - # Upscale mask embeddings and predict masks using the mask tokens - src = src.transpose(1, 2).view(b, c, h, w) - upscaled_embedding = self.output_upscaling(src) - hyper_in_list: List[torch.Tensor] = [] - for i in range(self.num_mask_tokens): - hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) - hyper_in = torch.stack(hyper_in_list, dim=1) - b, c, h, w = upscaled_embedding.shape - masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) - - # Generate mask quality predictions - iou_pred = self.iou_prediction_head(iou_token_out) - - return masks, iou_pred - - -# Lightly adapted from -# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa -class MLP(nn.Module): - def __init__( - self, - input_dim: int, - hidden_dim: int, - output_dim: int, - num_layers: int, - sigmoid_output: bool = False, - ) -> None: - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - self.sigmoid_output = sigmoid_output - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - if self.sigmoid_output: - x = F.sigmoid(x) - return x diff --git a/spaces/ChengZ/DeepDanbooru_string0/README.md b/spaces/ChengZ/DeepDanbooru_string0/README.md deleted file mode 100644 index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000 --- a/spaces/ChengZ/DeepDanbooru_string0/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: DeepDanbooru String -emoji: 💬 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -duplicated_from: NoCrypt/DeepDanbooru_string ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/CodingBillionaire/bark-voice-cloning/app.py b/spaces/CodingBillionaire/bark-voice-cloning/app.py deleted file mode 100644 index 4382649733cfacbc1267ca3253d4533fd4454325..0000000000000000000000000000000000000000 --- a/spaces/CodingBillionaire/bark-voice-cloning/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import math -import os.path -import uuid - -import gradio -import numpy -import torch - -from hubert.hubert_manager import HuBERTManager -from hubert.pre_kmeans_hubert import CustomHubert -from hubert.customtokenizer import CustomTokenizer -from encodec import EncodecModel -from encodec.utils import convert_audio - - -hubert_model = CustomHubert(HuBERTManager.make_sure_hubert_installed()) -tokenizer_model = CustomTokenizer.load_from_checkpoint( - HuBERTManager.make_sure_tokenizer_installed(model='quantifier_V1_hubert_base_ls960_23.pth'), - map_location=torch.device('cpu') -) -encodec_model = EncodecModel.encodec_model_24khz() - - - -def clone(audio, *args): - sr, wav = audio - - wav = torch.tensor(wav) - - if wav.dtype == torch.int16: - wav = wav.float() / 32767.0 - - if len(wav.shape) == 2: - if wav.shape[0] == 2: # Stereo to mono if needed - wav = wav.mean(0, keepdim=True) - if wav.shape[1] == 2: - wav = wav.mean(1, keepdim=False).unsqueeze(-1) - - wav = wav[-int(sr*20):] # Take only the last 20 seconds - - wav = wav.reshape(1, -1) # Reshape from gradio style to HuBERT shape. (N, 1) to (1, N) - - semantic_vectors = hubert_model.forward(wav, input_sample_hz=sr) - semantic_tokens = tokenizer_model.get_token(semantic_vectors) - - encodec_model.set_target_bandwidth(6.0) - wav = convert_audio(wav, sr, encodec_model.sample_rate, 1) - wav = wav.unsqueeze(0) - - with torch.no_grad(): - encoded_frames = encodec_model.encode(wav) - - codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [B, n_q, T] - - if not os.path.isdir('data/speakers'): - os.makedirs('data/speakers') - - file_path = f'data/speakers/{uuid.uuid4().hex}.npz' - - numpy.savez( - file_path, - semantic_prompt=semantic_tokens, - fine_prompt=codes, - coarse_prompt=codes[:2, :] - ) - - return file_path - - - -iface = gradio.interface.Interface(fn=clone, inputs=[ - 'audio', - gradio.Markdown( - ''' - # Bark text to speech voice cloning - [Model](https://huggingface.co/GitMylo/bark-voice-cloning/), [Model GitHub](https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer), [Webui GitHub](https://github.com/gitmylo/audio-webui) - - For faster creation of voice clones [Duplicate this space](https://huggingface.co/spaces/GitMylo/bark-voice-cloning?duplicate=true) - - Uploaded audio files get cut to 20 seconds in order to keep it fast for everyone. Only the last 20 seconds will be used. (Bark only uses the last 14 seconds anyway) - - ## Tips for better cloning - ### Make sure these things are **NOT** in your voice input: (in no particular order) - * Noise (You can use a noise remover before) - * Music (There are also music remover tools) (Unless you want music in the background) - * A cut-off at the end (This will cause it to try and continue on the generation) - * Under 1 second of training data (i personally suggest around 10 seconds for good potential, but i've had great results with 5 seconds as well.) - - ### What makes for good prompt audio? (in no particular order) - * Clearly spoken - * No weird background noises - * Only one speaker - * Audio which ends after a sentence ends - * Regular/common voice (They usually have more success, it's still capable of cloning complex voices, but not as good at it) - * Around 10 seconds of data - ''') -], outputs='file') -iface.launch() diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/__init__.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/__init__.py deleted file mode 100644 index b01f30cfddd8ed97d5a39f55641fbc929297d885..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# from .rpn import build_rpn diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/instancer/solver.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/instancer/solver.py deleted file mode 100644 index c991fcdcfbdc2fb6ac4815fe94b0c4ecb92a3e2d..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/instancer/solver.py +++ /dev/null @@ -1,305 +0,0 @@ -from fontTools.varLib.models import supportScalar -from fontTools.misc.fixedTools import MAX_F2DOT14 -from functools import lru_cache - -__all__ = ["rebaseTent"] - -EPSILON = 1 / (1 << 14) - - -def _reverse_negate(v): - return (-v[2], -v[1], -v[0]) - - -def _solve(tent, axisLimit, negative=False): - axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit - lower, peak, upper = tent - - # Mirror the problem such that axisDef <= peak - if axisDef > peak: - return [ - (scalar, _reverse_negate(t) if t is not None else None) - for scalar, t in _solve( - _reverse_negate(tent), - axisLimit.reverse_negate(), - not negative, - ) - ] - # axisDef <= peak - - # case 1: The whole deltaset falls outside the new limit; we can drop it - # - # peak - # 1.........................................o.......... - # / \ - # / \ - # / \ - # / \ - # 0---|-----------|----------|-------- o o----1 - # axisMin axisDef axisMax lower upper - # - if axisMax <= lower and axisMax < peak: - return [] # No overlap - - # case 2: Only the peak and outermost bound fall outside the new limit; - # we keep the deltaset, update peak and outermost bound and and scale deltas - # by the scalar value for the restricted axis at the new limit, and solve - # recursively. - # - # |peak - # 1...............................|.o.......... - # |/ \ - # / \ - # /| \ - # / | \ - # 0--------------------------- o | o----1 - # lower | upper - # | - # axisMax - # - # Convert to: - # - # 1............................................ - # | - # o peak - # /| - # /x| - # 0--------------------------- o o upper ----1 - # lower | - # | - # axisMax - if axisMax < peak: - mult = supportScalar({"tag": axisMax}, {"tag": tent}) - tent = (lower, axisMax, axisMax) - return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)] - - # lower <= axisDef <= peak <= axisMax - - gain = supportScalar({"tag": axisDef}, {"tag": tent}) - out = [(gain, None)] - - # First, the positive side - - # outGain is the scalar of axisMax at the tent. - outGain = supportScalar({"tag": axisMax}, {"tag": tent}) - - # Case 3a: Gain is more than outGain. The tent down-slope crosses - # the axis into negative. We have to split it into multiples. - # - # | peak | - # 1...................|.o.....|.............. - # |/x\_ | - # gain................+....+_.|.............. - # /| |y\| - # ................../.|....|..+_......outGain - # / | | | \ - # 0---|-----------o | | | o----------1 - # axisMin lower | | | upper - # | | | - # axisDef | axisMax - # | - # crossing - if gain > outGain: - # Crossing point on the axis. - crossing = peak + (1 - gain) * (upper - peak) - - loc = (axisDef, peak, crossing) - scalar = 1 - - # The part before the crossing point. - out.append((scalar - gain, loc)) - - # The part after the crossing point may use one or two tents, - # depending on whether upper is before axisMax or not, in one - # case we need to keep it down to eternity. - - # Case 3a1, similar to case 1neg; just one tent needed, as in - # the drawing above. - if upper >= axisMax: - loc = (crossing, axisMax, axisMax) - scalar = outGain - - out.append((scalar - gain, loc)) - - # Case 3a2: Similar to case 2neg; two tents needed, to keep - # down to eternity. - # - # | peak | - # 1...................|.o................|... - # |/ \_ | - # gain................+....+_............|... - # /| | \xxxxxxxxxxy| - # / | | \_xxxxxyyyy| - # / | | \xxyyyyyy| - # 0---|-----------o | | o-------|--1 - # axisMin lower | | upper | - # | | | - # axisDef | axisMax - # | - # crossing - else: - # A tent's peak cannot fall on axis default. Nudge it. - if upper == axisDef: - upper += EPSILON - - # Downslope. - loc1 = (crossing, upper, axisMax) - scalar1 = 0 - - # Eternity justify. - loc2 = (upper, axisMax, axisMax) - scalar2 = 0 - - out.append((scalar1 - gain, loc1)) - out.append((scalar2 - gain, loc2)) - - else: - # Special-case if peak is at axisMax. - if axisMax == peak: - upper = peak - - # Case 3: - # We keep delta as is and only scale the axis upper to achieve - # the desired new tent if feasible. - # - # peak - # 1.....................o.................... - # / \_| - # ..................../....+_.........outGain - # / | \ - # gain..............+......|..+_............. - # /| | | \ - # 0---|-----------o | | | o----------1 - # axisMin lower| | | upper - # | | newUpper - # axisDef axisMax - # - newUpper = peak + (1 - gain) * (upper - peak) - assert axisMax <= newUpper # Because outGain >= gain - if newUpper <= axisDef + (axisMax - axisDef) * 2: - upper = newUpper - if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper: - # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience - upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14 - assert peak < upper - - loc = (max(axisDef, lower), peak, upper) - scalar = 1 - - out.append((scalar - gain, loc)) - - # Case 4: New limit doesn't fit; we need to chop into two tents, - # because the shape of a triangle with part of one side cut off - # cannot be represented as a triangle itself. - # - # | peak | - # 1.........|......o.|.................... - # ..........|...../x\|.............outGain - # | |xxy|\_ - # | /xxxy| \_ - # | |xxxxy| \_ - # | /xxxxy| \_ - # 0---|-----|-oxxxxxx| o----------1 - # axisMin | lower | upper - # | | - # axisDef axisMax - # - else: - loc1 = (max(axisDef, lower), peak, axisMax) - scalar1 = 1 - - loc2 = (peak, axisMax, axisMax) - scalar2 = outGain - - out.append((scalar1 - gain, loc1)) - # Don't add a dirac delta! - if peak < axisMax: - out.append((scalar2 - gain, loc2)) - - # Now, the negative side - - # Case 1neg: Lower extends beyond axisMin: we chop. Simple. - # - # | |peak - # 1..................|...|.o................. - # | |/ \ - # gain...............|...+...\............... - # |x_/| \ - # |/ | \ - # _/| | \ - # 0---------------o | | o----------1 - # lower | | upper - # | | - # axisMin axisDef - # - if lower <= axisMin: - loc = (axisMin, axisMin, axisDef) - scalar = supportScalar({"tag": axisMin}, {"tag": tent}) - - out.append((scalar - gain, loc)) - - # Case 2neg: Lower is betwen axisMin and axisDef: we add two - # tents to keep it down all the way to eternity. - # - # | |peak - # 1...|...............|.o................. - # | |/ \ - # gain|...............+...\............... - # |yxxxxxxxxxxxxx/| \ - # |yyyyyyxxxxxxx/ | \ - # |yyyyyyyyyyyx/ | \ - # 0---|-----------o | o----------1 - # axisMin lower | upper - # | - # axisDef - # - else: - # A tent's peak cannot fall on axis default. Nudge it. - if lower == axisDef: - lower -= EPSILON - - # Downslope. - loc1 = (axisMin, lower, axisDef) - scalar1 = 0 - - # Eternity justify. - loc2 = (axisMin, axisMin, lower) - scalar2 = 0 - - out.append((scalar1 - gain, loc1)) - out.append((scalar2 - gain, loc2)) - - return out - - -@lru_cache(128) -def rebaseTent(tent, axisLimit): - """Given a tuple (lower,peak,upper) "tent" and new axis limits - (axisMin,axisDefault,axisMax), solves how to represent the tent - under the new axis configuration. All values are in normalized - -1,0,+1 coordinate system. Tent values can be outside this range. - - Return value is a list of tuples. Each tuple is of the form - (scalar,tent), where scalar is a multipler to multiply any - delta-sets by, and tent is a new tent for that output delta-set. - If tent value is None, that is a special deltaset that should - be always-enabled (called "gain").""" - - axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit - assert -1 <= axisMin <= axisDef <= axisMax <= +1 - - lower, peak, upper = tent - assert -2 <= lower <= peak <= upper <= +2 - - assert peak != 0 - - sols = _solve(tent, axisLimit) - - n = lambda v: axisLimit.renormalizeValue(v) - sols = [ - (scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None) - for scalar, v in sols - if scalar - ] - - return sols diff --git a/spaces/Danielzero/GPT3.5/ChuanhuChatbot.py b/spaces/Danielzero/GPT3.5/ChuanhuChatbot.py deleted file mode 100644 index cbf63e52857a1852658fdf2009ca26f9fb0a6bec..0000000000000000000000000000000000000000 --- a/spaces/Danielzero/GPT3.5/ChuanhuChatbot.py +++ /dev/null @@ -1,470 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.models import get_model - - -gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages -gr.Chatbot.postprocess = postprocess -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -def create_new_model(): - return get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0] - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - user_name = gr.State("") - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_question = gr.State("") - user_api_key = gr.State(my_api_key) - current_model = gr.State(create_new_model) - - topic = gr.State(i18n("未命名对话历史记录")) - - with gr.Row(): - gr.HTML(CHUANHU_TITLE, elem_id="app_title") - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - with gr.Row(elem_id="float_display"): - user_info = gr.Markdown(value="getting user info...", elem_id="user_info") - - # https://github.com/gradio-app/gradio/pull/3296 - def create_greeting(request: gr.Request): - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get User Name: {request.username}") - return gr.Markdown.update(value=f"User: {request.username}"), request.username - else: - return gr.Markdown.update(value=f"User: default", visible=False), "" - demo.load(create_greeting, inputs=None, outputs=[user_info, user_name]) - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(min_width=225, scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder=i18n("在这里输入") - ).style(container=False) - with gr.Column(min_width=42, scale=1): - submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn") - cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn") - with gr.Row(): - emptyBtn = gr.Button( - i18n("🧹 新的对话"), - ) - retryBtn = gr.Button(i18n("🔄 重新生成")) - delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话")) - delLastBtn = gr.Button(i18n("🗑️ 删除最新对话")) - with gr.Row(visible=False) as like_dislike_area: - with gr.Column(min_width=20, scale=1): - likeBtn = gr.Button(i18n("👍")) - with gr.Column(min_width=20, scale=1): - dislikeBtn = gr.Button(i18n("👎")) - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label=i18n("模型")): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"Your API-key...", - value=hide_middle_chars(user_api_key.value), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block") - else: - usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block") - model_select_dropdown = gr.Dropdown( - label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True - ) - lora_select_dropdown = gr.Dropdown( - label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False - ) - with gr.Row(): - use_streaming_checkbox = gr.Checkbox( - label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION - ) - single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False) - use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False) - language_select_dropdown = gr.Dropdown( - label=i18n("选择回复语言(针对搜索&索引功能)"), - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label=i18n("上传"), type="file") - two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False)) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入System Prompt..."), - label="System prompt", - value=INITIAL_SYSTEM_PROMPT, - lines=10, - ).style(container=False) - with gr.Accordion(label=i18n("加载Prompt模板"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label=i18n("选择Prompt模板集合文件"), - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label=i18n("从Prompt模板中加载"), - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - ).style(container=False) - - with gr.Tab(label=i18n("保存/加载")): - with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label=i18n("从列表中加载对话"), - choices=get_history_names(plain=True), - multiselect=False, - value=get_history_names(plain=True)[0], - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=i18n("设置文件名: 默认为.json,可选为.md"), - label=i18n("设置保存文件名"), - value=i18n("对话历史记录"), - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button(i18n("💾 保存对话")) - exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown")) - gr.Markdown(i18n("默认保存于history文件夹")) - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label=i18n("高级")): - gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")) - gr.HTML(APPEARANCE_SWITCHER, elem_classes="insert_block") - with gr.Accordion(i18n("参数"), open=False): - temperature_slider = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="temperature", - ) - top_p_slider = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="top-p", - ) - n_choices_slider = gr.Slider( - minimum=1, - maximum=10, - value=1, - step=1, - interactive=True, - label="n choices", - ) - stop_sequence_txt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入停止符,用英文逗号隔开..."), - label="stop", - value="", - lines=1, - ) - max_context_length_slider = gr.Slider( - minimum=1, - maximum=32768, - value=2000, - step=1, - interactive=True, - label="max context", - ) - max_generation_slider = gr.Slider( - minimum=1, - maximum=32768, - value=1000, - step=1, - interactive=True, - label="max generations", - ) - presence_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="presence penalty", - ) - frequency_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="frequency penalty", - ) - logit_bias_txt = gr.Textbox( - show_label=True, - placeholder=f"word:likelihood", - label="logit bias", - value="", - lines=1, - ) - user_identifier_txt = gr.Textbox( - show_label=True, - placeholder=i18n("用于定位滥用行为"), - label=i18n("用户名"), - value=user_name.value, - lines=1, - ) - - with gr.Accordion(i18n("网络设置"), open=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入API-Host..."), - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1, - ) - changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址")) - proxyTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入代理地址..."), - label=i18n("代理地址(示例:http://127.0.0.1:10809)"), - value="", - lines=2, - ) - changeProxyBtn = gr.Button(i18n("🔄 设置代理地址")) - default_btn = gr.Button(i18n("🔙 恢复默认设置")) - - gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description") - gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer") - demo.load(refresh_ui_elements_on_load, [current_model, model_select_dropdown], [like_dislike_area], show_progress=False) - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - current_model, - user_question, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, status_display], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False - ) - - load_history_from_file_args = dict( - fn=load_chat_history, - inputs=[current_model, historyFileSelectDropdown, chatbot, user_name], - outputs=[saveFileName, systemPromptTxt, chatbot] - ) - - - # Chatbot - cancelBtn.click(interrupt, [current_model], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - index_files.change(handle_file_upload, [current_model, index_files, chatbot], [index_files, chatbot, status_display]) - - emptyBtn.click( - reset, - inputs=[current_model], - outputs=[chatbot, status_display], - show_progress=True, - ) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - current_model, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - [chatbot, status_display], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [current_model], - [status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [current_model, chatbot], - [chatbot, status_display], - show_progress=False - ) - - likeBtn.click( - like, - [current_model], - [status_display], - show_progress=False - ) - - dislikeBtn.click( - dislike, - [current_model], - [status_display], - show_progress=False - ) - - two_column.change(update_doc_config, [two_column], None) - - # LLM Models - keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display]).then(**get_usage_args) - keyTxt.submit(**get_usage_args) - single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None) - model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True) - model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False) - lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True) - - # Template - systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None) - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change(**load_history_from_file_args) - downloadFile.change(**load_history_from_file_args) - - # Advanced - max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None) - temperature_slider.change(set_temperature, [current_model, temperature_slider], None) - top_p_slider.change(set_top_p, [current_model, top_p_slider], None) - n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None) - stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None) - max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None) - presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None) - frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None) - logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None) - user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None) - - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = i18n("川虎Chat 🚀") - -if __name__ == "__main__": - reload_javascript() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - favicon_path="./assets/favicon.ico", - ) - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/DavidHosp/Movie_Recommendation_System/README.md b/spaces/DavidHosp/Movie_Recommendation_System/README.md deleted file mode 100644 index 493ea054c4f5cf89748ff34f69142d806316ca86..0000000000000000000000000000000000000000 --- a/spaces/DavidHosp/Movie_Recommendation_System/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Movie Recommendation System -emoji: 🏃 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DemoLou/moe-tts/commons.py b/spaces/DemoLou/moe-tts/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/DemoLou/moe-tts/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/DiffusionArtco/Diffusion50/app.py b/spaces/DiffusionArtco/Diffusion50/app.py deleted file mode 100644 index b67d049f9b7e78d036b66505fdf1d1fe3e82d6e9..0000000000000000000000000000000000000000 --- a/spaces/DiffusionArtco/Diffusion50/app.py +++ /dev/null @@ -1,35 +0,0 @@ -import gradio as gr -import requests -from PIL import Image -from io import BytesIO -import base64 - -api_url = "https://5cb20b40-572c-426f-9466-995256f9b6eb.id.repl.co/generate_image" - -def generate_image(model="Deliberate", prompt="", seed=0, negative_prompt="", sampler="k_dpmpp_2s_a", steps=50): - data = "?model=" + model + "&prompt=" + prompt + "&seed=" + str(seed) + "&negative_prompt=" + negative_prompt + "&sampler=" + sampler + "&steps=" + str(steps) - response = requests.post(api_url + data, timeout=400) - if response.status_code == 200: - img_base64 = response.json()["url"] - img_bytes = base64.b64decode(img_base64) - img = Image.open(BytesIO(img_bytes)) - return img - else: - return None - -inputs = [ - gr.inputs.Dropdown(['Analog Diffusion', 'Anything Diffusion', 'Anything v3', 'ChilloutMix', 'Counterfeit', 'CyriousMix', 'Deliberate', 'Dreamshaper', 'Dreamlike Diffusion', 'Dreamlike Photoreal', 'Experience', 'FaeTastic', 'Hassanblend', 'Mega Merge Diffusion', 'Midjourney Diffusion', 'ModernArt Diffusion', 'Movie Diffusion', 'NeverEnding Dream', 'Perfect World', 'PortraitPlus', 'ProtoGen', 'Protogen Anime', 'Protogen Infinity', 'RealBiter', 'Realism Engine', 'Realistic Vision', 'Rev Animated', 'RPG', 'Seek.art MEGA', 'stable_diffusion', 'stable_diffusion_2.1' , 'Unstable Ink Dream'], label="Model", default="Deliberate"), - gr.inputs.Textbox(label="Prompt", default=""), - gr.inputs.Number(label="Seed", default=0), - gr.inputs.Textbox(label="Negative Prompt", default=""), - gr.inputs.Dropdown(["k_lms", "k_heun", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "DDIM", "k_dpm_fast", "k_dpm_adaptive", "k_dpmpp_2m", "k_dpmpp_2s_a", "k_dpmpp_sde"], label="Sampler", default="k_dpmpp_2s_a"), - gr.inputs.Number(label="Steps", default=50) -] - -outputs = gr.outputs.Image(label="Generated Image", type="pil") - -interface = gr.Interface(generate_image, inputs, outputs, title="Diffusion 50", - description="
Live access to the most popular Diffusion models
", - examples=[]) - -interface.launch() diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/upfirdn2d.py b/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 02fc25af780868d9b883631eb6b03a25c225d745..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.nn import functional as F - - -module_path = os.path.dirname(__file__) - - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) \ No newline at end of file diff --git a/spaces/DragGan/DragGan/visualizer_drag.py b/spaces/DragGan/DragGan/visualizer_drag.py deleted file mode 100644 index 9120906ebf87f5c16a3a1cff6a2e1b721a502688..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/visualizer_drag.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import click -import os - -import multiprocessing -import numpy as np -import torch -import imgui -import dnnlib -from gui_utils import imgui_window -from gui_utils import imgui_utils -from gui_utils import gl_utils -from gui_utils import text_utils -from viz import renderer -from viz import pickle_widget -from viz import latent_widget -from viz import drag_widget -from viz import capture_widget - -#---------------------------------------------------------------------------- - -class Visualizer(imgui_window.ImguiWindow): - def __init__(self, capture_dir=None): - super().__init__(title='DragGAN', window_width=3840, window_height=2160) - - # Internals. - self._last_error_print = None - self._async_renderer = AsyncRenderer() - self._defer_rendering = 0 - self._tex_img = None - self._tex_obj = None - self._mask_obj = None - self._image_area = None - self._status = dnnlib.EasyDict() - - # Widget interface. - self.args = dnnlib.EasyDict() - self.result = dnnlib.EasyDict() - self.pane_w = 0 - self.label_w = 0 - self.button_w = 0 - self.image_w = 0 - self.image_h = 0 - - # Widgets. - self.pickle_widget = pickle_widget.PickleWidget(self) - self.latent_widget = latent_widget.LatentWidget(self) - self.drag_widget = drag_widget.DragWidget(self) - self.capture_widget = capture_widget.CaptureWidget(self) - - if capture_dir is not None: - self.capture_widget.path = capture_dir - - # Initialize window. - self.set_position(0, 0) - self._adjust_font_size() - self.skip_frame() # Layout may change after first frame. - - def close(self): - super().close() - if self._async_renderer is not None: - self._async_renderer.close() - self._async_renderer = None - - def add_recent_pickle(self, pkl, ignore_errors=False): - self.pickle_widget.add_recent(pkl, ignore_errors=ignore_errors) - - def load_pickle(self, pkl, ignore_errors=False): - self.pickle_widget.load(pkl, ignore_errors=ignore_errors) - - def print_error(self, error): - error = str(error) - if error != self._last_error_print: - print('\n' + error + '\n') - self._last_error_print = error - - def defer_rendering(self, num_frames=1): - self._defer_rendering = max(self._defer_rendering, num_frames) - - def clear_result(self): - self._async_renderer.clear_result() - - def set_async(self, is_async): - if is_async != self._async_renderer.is_async: - self._async_renderer.set_async(is_async) - self.clear_result() - if 'image' in self.result: - self.result.message = 'Switching rendering process...' - self.defer_rendering() - - def _adjust_font_size(self): - old = self.font_size - self.set_font_size(min(self.content_width / 120, self.content_height / 60)) - if self.font_size != old: - self.skip_frame() # Layout changed. - - def check_update_mask(self, **args): - update_mask = False - if 'pkl' in self._status: - if self._status.pkl != args['pkl']: - update_mask = True - self._status.pkl = args['pkl'] - if 'w0_seed' in self._status: - if self._status.w0_seed != args['w0_seed']: - update_mask = True - self._status.w0_seed = args['w0_seed'] - return update_mask - - def capture_image_frame(self): - self.capture_next_frame() - captured_frame = self.pop_captured_frame() - captured_image = None - if captured_frame is not None: - x1, y1, w, h = self._image_area - captured_image = captured_frame[y1:y1+h, x1:x1+w, :] - return captured_image - - def get_drag_info(self): - seed = self.latent_widget.seed - points = self.drag_widget.points - targets = self.drag_widget.targets - mask = self.drag_widget.mask - w = self._async_renderer._renderer_obj.w - return seed, points, targets, mask, w - - def draw_frame(self): - self.begin_frame() - self.args = dnnlib.EasyDict() - self.pane_w = self.font_size * 18 - self.button_w = self.font_size * 5 - self.label_w = round(self.font_size * 4.5) - - # Detect mouse dragging in the result area. - if self._image_area is not None: - if not hasattr(self.drag_widget, 'width'): - self.drag_widget.init_mask(self.image_w, self.image_h) - clicked, down, img_x, img_y = imgui_utils.click_hidden_window( - '##image_area', self._image_area[0], self._image_area[1], self._image_area[2], self._image_area[3], self.image_w, self.image_h) - self.drag_widget.action(clicked, down, img_x, img_y) - - # Begin control pane. - imgui.set_next_window_position(0, 0) - imgui.set_next_window_size(self.pane_w, self.content_height) - imgui.begin('##control_pane', closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE)) - - # Widgets. - expanded, _visible = imgui_utils.collapsing_header('Network & latent', default=True) - self.pickle_widget(expanded) - self.latent_widget(expanded) - expanded, _visible = imgui_utils.collapsing_header('Drag', default=True) - self.drag_widget(expanded) - expanded, _visible = imgui_utils.collapsing_header('Capture', default=True) - self.capture_widget(expanded) - - # Render. - if self.is_skipping_frames(): - pass - elif self._defer_rendering > 0: - self._defer_rendering -= 1 - elif self.args.pkl is not None: - self._async_renderer.set_args(**self.args) - result = self._async_renderer.get_result() - if result is not None: - self.result = result - if 'stop' in self.result and self.result.stop: - self.drag_widget.stop_drag() - if 'points' in self.result: - self.drag_widget.set_points(self.result.points) - if 'init_net' in self.result: - if self.result.init_net: - self.drag_widget.reset_point() - - if self.check_update_mask(**self.args): - h, w, _ = self.result.image.shape - self.drag_widget.init_mask(w, h) - - # Display. - max_w = self.content_width - self.pane_w - max_h = self.content_height - pos = np.array([self.pane_w + max_w / 2, max_h / 2]) - if 'image' in self.result: - if self._tex_img is not self.result.image: - self._tex_img = self.result.image - if self._tex_obj is None or not self._tex_obj.is_compatible(image=self._tex_img): - self._tex_obj = gl_utils.Texture(image=self._tex_img, bilinear=False, mipmap=False) - else: - self._tex_obj.update(self._tex_img) - self.image_h, self.image_w = self._tex_obj.height, self._tex_obj.width - zoom = min(max_w / self._tex_obj.width, max_h / self._tex_obj.height) - zoom = np.floor(zoom) if zoom >= 1 else zoom - self._tex_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True) - if self.drag_widget.show_mask and hasattr(self.drag_widget, 'mask'): - mask = ((1-self.drag_widget.mask.unsqueeze(-1)) * 255).to(torch.uint8) - if self._mask_obj is None or not self._mask_obj.is_compatible(image=self._tex_img): - self._mask_obj = gl_utils.Texture(image=mask, bilinear=False, mipmap=False) - else: - self._mask_obj.update(mask) - self._mask_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True, alpha=0.15) - - if self.drag_widget.mode in ['flexible', 'fixed']: - posx, posy = imgui.get_mouse_pos() - if posx >= self.pane_w: - pos_c = np.array([posx, posy]) - gl_utils.draw_circle(center=pos_c, radius=self.drag_widget.r_mask * zoom, alpha=0.5) - - rescale = self._tex_obj.width / 512 * zoom - - for point in self.drag_widget.targets: - pos_x = self.pane_w + max_w / 2 + (point[1] - self.image_w//2) * zoom - pos_y = max_h / 2 + (point[0] - self.image_h//2) * zoom - gl_utils.draw_circle(center=np.array([pos_x, pos_y]), color=[0,0,1], radius=9 * rescale) - - for point in self.drag_widget.points: - pos_x = self.pane_w + max_w / 2 + (point[1] - self.image_w//2) * zoom - pos_y = max_h / 2 + (point[0] - self.image_h//2) * zoom - gl_utils.draw_circle(center=np.array([pos_x, pos_y]), color=[1,0,0], radius=9 * rescale) - - for point, target in zip(self.drag_widget.points, self.drag_widget.targets): - t_x = self.pane_w + max_w / 2 + (target[1] - self.image_w//2) * zoom - t_y = max_h / 2 + (target[0] - self.image_h//2) * zoom - - p_x = self.pane_w + max_w / 2 + (point[1] - self.image_w//2) * zoom - p_y = max_h / 2 + (point[0] - self.image_h//2) * zoom - - gl_utils.draw_arrow(p_x, p_y, t_x, t_y, l=8 * rescale, width = 3 * rescale) - - imshow_w = int(self._tex_obj.width * zoom) - imshow_h = int(self._tex_obj.height * zoom) - self._image_area = [int(self.pane_w + max_w / 2 - imshow_w / 2), int(max_h / 2 - imshow_h / 2), imshow_w, imshow_h] - if 'error' in self.result: - self.print_error(self.result.error) - if 'message' not in self.result: - self.result.message = str(self.result.error) - if 'message' in self.result: - tex = text_utils.get_texture(self.result.message, size=self.font_size, max_width=max_w, max_height=max_h, outline=2) - tex.draw(pos=pos, align=0.5, rint=True, color=1) - - # End frame. - self._adjust_font_size() - imgui.end() - self.end_frame() - -#---------------------------------------------------------------------------- - -class AsyncRenderer: - def __init__(self): - self._closed = False - self._is_async = False - self._cur_args = None - self._cur_result = None - self._cur_stamp = 0 - self._renderer_obj = None - self._args_queue = None - self._result_queue = None - self._process = None - - def close(self): - self._closed = True - self._renderer_obj = None - if self._process is not None: - self._process.terminate() - self._process = None - self._args_queue = None - self._result_queue = None - - @property - def is_async(self): - return self._is_async - - def set_async(self, is_async): - self._is_async = is_async - - def set_args(self, **args): - assert not self._closed - args2 = args.copy() - args_mask = args2.pop('mask') - if self._cur_args: - _cur_args = self._cur_args.copy() - cur_args_mask = _cur_args.pop('mask') - else: - _cur_args = self._cur_args - # if args != self._cur_args: - if args2 != _cur_args: - if self._is_async: - self._set_args_async(**args) - else: - self._set_args_sync(**args) - self._cur_args = args - - def _set_args_async(self, **args): - if self._process is None: - self._args_queue = multiprocessing.Queue() - self._result_queue = multiprocessing.Queue() - try: - multiprocessing.set_start_method('spawn') - except RuntimeError: - pass - self._process = multiprocessing.Process(target=self._process_fn, args=(self._args_queue, self._result_queue), daemon=True) - self._process.start() - self._args_queue.put([args, self._cur_stamp]) - - def _set_args_sync(self, **args): - if self._renderer_obj is None: - self._renderer_obj = renderer.Renderer() - self._cur_result = self._renderer_obj.render(**args) - - def get_result(self): - assert not self._closed - if self._result_queue is not None: - while self._result_queue.qsize() > 0: - result, stamp = self._result_queue.get() - if stamp == self._cur_stamp: - self._cur_result = result - return self._cur_result - - def clear_result(self): - assert not self._closed - self._cur_args = None - self._cur_result = None - self._cur_stamp += 1 - - @staticmethod - def _process_fn(args_queue, result_queue): - renderer_obj = renderer.Renderer() - cur_args = None - cur_stamp = None - while True: - args, stamp = args_queue.get() - while args_queue.qsize() > 0: - args, stamp = args_queue.get() - if args != cur_args or stamp != cur_stamp: - result = renderer_obj.render(**args) - if 'error' in result: - result.error = renderer.CapturedException(result.error) - result_queue.put([result, stamp]) - cur_args = args - cur_stamp = stamp - -#---------------------------------------------------------------------------- - -@click.command() -@click.argument('pkls', metavar='PATH', nargs=-1) -@click.option('--capture-dir', help='Where to save screenshot captures', metavar='PATH', default=None) -@click.option('--browse-dir', help='Specify model path for the \'Browse...\' button', metavar='PATH') -def main( - pkls, - capture_dir, - browse_dir -): - """Interactive model visualizer. - - Optional PATH argument can be used specify which .pkl file to load. - """ - viz = Visualizer(capture_dir=capture_dir) - - if browse_dir is not None: - viz.pickle_widget.search_dirs = [browse_dir] - - # List pickles. - if len(pkls) > 0: - for pkl in pkls: - viz.add_recent_pickle(pkl) - viz.load_pickle(pkls[0]) - else: - pretrained = [ - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqcat-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqdog-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqv2-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqwild-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-brecahad-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-celebahq-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-cifar10-32x32.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-lsundog-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfaces-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfacesu-1024x1024.pkl' - ] - - # Populate recent pickles list with pretrained model URLs. - for url in pretrained: - viz.add_recent_pickle(url) - - # Run. - while not viz.should_close(): - viz.draw_frame() - viz.close() - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - main() - -#---------------------------------------------------------------------------- diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/dataset_mappers/__init__.py b/spaces/EPFL-VILAB/MultiMAE/mask2former/data/dataset_mappers/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/dataset_mappers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/EPFL-VILAB/MultiMAE/utils/datasets.py b/spaces/EPFL-VILAB/MultiMAE/utils/datasets.py deleted file mode 100644 index d8e273e57f14da62ae27c95273645441c4637247..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/utils/datasets.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) EPFL VILAB. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -------------------------------------------------------- -# Based on BEiT, timm, DINO, DeiT and MAE-priv code bases -# https://github.com/microsoft/unilm/tree/master/beit -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/facebookresearch/deit -# https://github.com/facebookresearch/dino -# https://github.com/BUPT-PRIV/MAE-priv -# -------------------------------------------------------- - -import os -import random - -import numpy as np -import torch -import torchvision.transforms.functional as TF -from torchvision import datasets, transforms - -from utils import create_transform - -from .data_constants import (IMAGE_TASKS, IMAGENET_DEFAULT_MEAN, - IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, - IMAGENET_INCEPTION_STD) -from .dataset_folder import ImageFolder, MultiTaskImageFolder - - -def denormalize(img, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD): - return TF.normalize( - img.clone(), - mean= [-m/s for m, s in zip(mean, std)], - std= [1/s for s in std] - ) - - -class DataAugmentationForMAE(object): - def __init__(self, args): - imagenet_default_mean_and_std = args.imagenet_default_mean_and_std - mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN - std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD - - trans = [transforms.RandomResizedCrop(args.input_size)] - if args.hflip > 0.0: - trans.append(transforms.RandomHorizontalFlip(args.hflip)) - trans.extend([ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std))]) - - self.transform = transforms.Compose(trans) - - def __call__(self, image): - return self.transform(image) - - def __repr__(self): - repr = "(DataAugmentationForBEiT,\n" - repr += " transform = %s,\n" % str(self.transform) - repr += ")" - return repr - - -class DataAugmentationForMultiMAE(object): - def __init__(self, args): - imagenet_default_mean_and_std = args.imagenet_default_mean_and_std - self.rgb_mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN - self.rgb_std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD - self.input_size = args.input_size - self.hflip = args.hflip - - def __call__(self, task_dict): - flip = random.random() < self.hflip # Stores whether to flip all images or not - ijhw = None # Stores crop coordinates used for all tasks - - # Crop and flip all tasks randomly, but consistently for all tasks - for task in task_dict: - if task not in IMAGE_TASKS: - continue - if ijhw is None: - # Official MAE code uses (0.2, 1.0) for scale and (0.75, 1.3333) for ratio - ijhw = transforms.RandomResizedCrop.get_params( - task_dict[task], scale=(0.2, 1.0), ratio=(0.75, 1.3333) - ) - i, j, h, w = ijhw - task_dict[task] = TF.crop(task_dict[task], i, j, h, w) - task_dict[task] = task_dict[task].resize((self.input_size, self.input_size)) - if flip: - task_dict[task] = TF.hflip(task_dict[task]) - - # Convert to Tensor - for task in task_dict: - if task in ['depth']: - img = torch.Tensor(np.array(task_dict[task]) / 2 ** 16) - img = img.unsqueeze(0) # 1 x H x W - elif task in ['rgb']: - img = TF.to_tensor(task_dict[task]) - img = TF.normalize(img, mean=self.rgb_mean, std=self.rgb_std) - elif task in ['semseg', 'semseg_coco']: - # TODO: add this to a config instead - # Rescale to 0.25x size (stride 4) - scale_factor = 0.25 - img = task_dict[task].resize((int(self.input_size * scale_factor), int(self.input_size * scale_factor))) - # Using pil_to_tensor keeps it in uint8, to_tensor converts it to float (rescaled to [0, 1]) - img = TF.pil_to_tensor(img).to(torch.long).squeeze(0) - - task_dict[task] = img - - return task_dict - - def __repr__(self): - repr = "(DataAugmentationForMultiMAE,\n" - #repr += " transform = %s,\n" % str(self.transform) - repr += ")" - return repr - -def build_pretraining_dataset(args): - transform = DataAugmentationForMAE(args) - print("Data Aug = %s" % str(transform)) - return ImageFolder(args.data_path, transform=transform) - -def build_multimae_pretraining_dataset(args): - transform = DataAugmentationForMultiMAE(args) - return MultiTaskImageFolder(args.data_path, args.all_domains, transform=transform) - -def build_dataset(is_train, args): - transform = build_transform(is_train, args) - - print("Transform = ") - if isinstance(transform, tuple): - for trans in transform: - print(" - - - - - - - - - - ") - for t in trans.transforms: - print(t) - else: - for t in transform.transforms: - print(t) - print("---------------------------") - - if args.data_set == 'CIFAR': - dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform) - nb_classes = 100 - elif args.data_set == 'IMNET': - # root = os.path.join(args.data_path, 'train' if is_train else 'val') - root = args.data_path if is_train else args.eval_data_path - dataset = datasets.ImageFolder(root, transform=transform) - nb_classes = 1000 - elif args.data_set == "image_folder": - root = args.data_path if is_train else args.eval_data_path - dataset = ImageFolder(root, transform=transform) - nb_classes = args.nb_classes - assert len(dataset.class_to_idx) == nb_classes - else: - raise NotImplementedError() - assert nb_classes == args.nb_classes - print("Number of the class = %d" % args.nb_classes) - - return dataset, nb_classes - - -def build_transform(is_train, args): - resize_im = args.input_size > 32 - imagenet_default_mean_and_std = args.imagenet_default_mean_and_std - mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN - std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD - - if is_train: - # this should always dispatch to transforms_imagenet_train - transform = create_transform( - input_size=args.input_size, - is_training=True, - color_jitter=args.color_jitter, - auto_augment=args.aa, - interpolation=args.train_interpolation, - re_prob=args.reprob, - re_mode=args.remode, - re_count=args.recount, - mean=mean, - std=std, - ) - if not resize_im: - # replace RandomResizedCropAndInterpolation with - # RandomCrop - transform.transforms[0] = transforms.RandomCrop( - args.input_size, padding=4) - return transform - - t = [] - if resize_im: - if args.crop_pct is None: - if args.input_size < 384: - args.crop_pct = 224 / 256 - else: - args.crop_pct = 1.0 - size = int(args.input_size / args.crop_pct) - t.append( - transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images - ) - t.append(transforms.CenterCrop(args.input_size)) - - t.append(transforms.ToTensor()) - t.append(transforms.Normalize(mean, std)) - return transforms.Compose(t) diff --git a/spaces/Edward-Ji/essentials-of-microeconomics/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/Edward-Ji/essentials-of-microeconomics/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index bbcbbe7d61558adde3cbfd0c7a63a67c27ed6d30..0000000000000000000000000000000000000000 --- a/spaces/Edward-Ji/essentials-of-microeconomics/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" deleted file mode 100644 index 3da831fd07e361a532777c83bb02cff265b94abd..0000000000000000000000000000000000000000 --- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ /dev/null @@ -1,194 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file, get_conf -import re, requests, unicodedata, os -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -def download_arxiv_(url_pdf): - if 'arxiv.org' not in url_pdf: - if ('.' in url_pdf) and ('/' not in url_pdf): - new_url = 'https://arxiv.org/abs/'+url_pdf - print('下载编号:', url_pdf, '自动定位:', new_url) - # download_arxiv_(new_url) - return download_arxiv_(new_url) - else: - print('不能识别的URL!') - return None - if 'abs' in url_pdf: - url_pdf = url_pdf.replace('abs', 'pdf') - url_pdf = url_pdf + '.pdf' - - url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs') - title, other_info = get_name(_url_=url_abs) - - paper_id = title.split()[0] # '[1712.00559]' - if '2' in other_info['year']: - title = other_info['year'] + ' ' + title - - known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI'] - for k in known_conf: - if k in other_info['comment']: - title = k + ' ' + title - - download_dir = './gpt_log/arxiv/' - os.makedirs(download_dir, exist_ok=True) - - title_str = title.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - - requests_pdf_url = url_pdf - file_path = download_dir+title_str - # if os.path.exists(file_path): - # print('返回缓存文件') - # return './gpt_log/arxiv/'+title_str - - print('下载中') - proxies, = get_conf('proxies') - r = requests.get(requests_pdf_url, proxies=proxies) - with open(file_path, 'wb+') as f: - f.write(r.content) - print('下载完成') - - # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) - # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) - - x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) - x = x.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - return './gpt_log/arxiv/'+title_str, other_info - - -def get_name(_url_): - import os - from bs4 import BeautifulSoup - print('正在获取文献名!') - print(_url_) - - # arxiv_recall = {} - # if os.path.exists('./arxiv_recall.pkl'): - # with open('./arxiv_recall.pkl', 'rb') as f: - # arxiv_recall = pickle.load(f) - - # if _url_ in arxiv_recall: - # print('在缓存中') - # return arxiv_recall[_url_] - - proxies, = get_conf('proxies') - res = requests.get(_url_, proxies=proxies) - - bs = BeautifulSoup(res.text, 'html.parser') - other_details = {} - - # get year - try: - year = bs.find_all(class_='dateline')[0].text - year = re.search(r'(\d{4})', year, re.M | re.I).group(1) - other_details['year'] = year - abstract = bs.find_all(class_='abstract mathjax')[0].text - other_details['abstract'] = abstract - except: - other_details['year'] = '' - print('年份获取失败') - - # get author - try: - authors = bs.find_all(class_='authors')[0].text - authors = authors.split('Authors:')[1] - other_details['authors'] = authors - except: - other_details['authors'] = '' - print('authors获取失败') - - # get comment - try: - comment = bs.find_all(class_='metatable')[0].text - real_comment = None - for item in comment.replace('\n', ' ').split(' '): - if 'Comments' in item: - real_comment = item - if real_comment is not None: - other_details['comment'] = real_comment - else: - other_details['comment'] = '' - except: - other_details['comment'] = '' - print('年份获取失败') - - title_str = BeautifulSoup( - res.text, 'html.parser').find('title').contents[0] - print('获取成功:', title_str) - # arxiv_recall[_url_] = (title_str+'.pdf', other_details) - # with open('./arxiv_recall.pkl', 'wb') as f: - # pickle.dump(arxiv_recall, f) - - return title_str+'.pdf', other_details - - - -@CatchException -def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - - CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……" - import glob - import os - - # 基本信息:功能、贡献者 - chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 提取摘要,下载PDF文档 - try: - pdf_path, info = download_arxiv_(txt) - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"下载pdf文件未成功") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 翻译摘要等 - i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" - i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - msg = '正常' - # ** gpt request ** - # 单线,获取文章meta信息 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="Your job is to collect information from materials and translate to Chinese。", - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - # 写入文件 - import shutil - # 重置文件的创建时间 - shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path) - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载")) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - diff --git a/spaces/Ferion/image-matting-app/ppmatting/core/val.py b/spaces/Ferion/image-matting-app/ppmatting/core/val.py deleted file mode 100644 index 3e3117725ab3792fc7a2344082ad45f26cb2cd28..0000000000000000000000000000000000000000 --- a/spaces/Ferion/image-matting-app/ppmatting/core/val.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import cv2 -import numpy as np -import time -import paddle -import paddle.nn.functional as F -from paddleseg.utils import TimeAverager, calculate_eta, logger, progbar - -from ppmatting.metrics import metrics_class_dict - -np.set_printoptions(suppress=True) - - -def save_alpha_pred(alpha, path): - """ - The value of alpha is range [0, 1], shape should be [h,w] - """ - dirname = os.path.dirname(path) - if not os.path.exists(dirname): - os.makedirs(dirname) - - alpha = (alpha).astype('uint8') - cv2.imwrite(path, alpha) - - -def reverse_transform(alpha, trans_info): - """recover pred to origin shape""" - for item in trans_info[::-1]: - if item[0][0] == 'resize': - h, w = item[1][0], item[1][1] - alpha = F.interpolate(alpha, [h, w], mode='bilinear') - elif item[0][0] == 'padding': - h, w = item[1][0], item[1][1] - alpha = alpha[:, :, 0:h, 0:w] - else: - raise Exception("Unexpected info '{}' in im_info".format(item[0])) - return alpha - - -def evaluate(model, - eval_dataset, - num_workers=0, - print_detail=True, - save_dir='output/results', - save_results=True, - metrics='sad'): - model.eval() - nranks = paddle.distributed.ParallelEnv().nranks - local_rank = paddle.distributed.ParallelEnv().local_rank - if nranks > 1: - # Initialize parallel environment if not done. - if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( - ): - paddle.distributed.init_parallel_env() - - loader = paddle.io.DataLoader( - eval_dataset, - batch_size=1, - drop_last=False, - num_workers=num_workers, - return_list=True, ) - - total_iters = len(loader) - # Get metric instances and data saving - metrics_ins = {} - metrics_data = {} - if isinstance(metrics, str): - metrics = [metrics] - elif not isinstance(metrics, list): - metrics = ['sad'] - for key in metrics: - key = key.lower() - metrics_ins[key] = metrics_class_dict[key]() - metrics_data[key] = None - - if print_detail: - logger.info("Start evaluating (total_samples: {}, total_iters: {})...". - format(len(eval_dataset), total_iters)) - progbar_val = progbar.Progbar( - target=total_iters, verbose=1 if nranks < 2 else 2) - reader_cost_averager = TimeAverager() - batch_cost_averager = TimeAverager() - batch_start = time.time() - - img_name = '' - i = 0 - with paddle.no_grad(): - for iter, data in enumerate(loader): - reader_cost_averager.record(time.time() - batch_start) - alpha_pred = model(data) - - alpha_pred = reverse_transform(alpha_pred, data['trans_info']) - alpha_pred = alpha_pred.numpy() - - alpha_gt = data['alpha'].numpy() * 255 - trimap = data.get('ori_trimap') - if trimap is not None: - trimap = trimap.numpy().astype('uint8') - alpha_pred = np.round(alpha_pred * 255) - for key in metrics_ins.keys(): - metrics_data[key] = metrics_ins[key].update(alpha_pred, - alpha_gt, trimap) - - if save_results: - alpha_pred_one = alpha_pred[0].squeeze() - if trimap is not None: - trimap = trimap.squeeze().astype('uint8') - alpha_pred_one[trimap == 255] = 255 - alpha_pred_one[trimap == 0] = 0 - - save_name = data['img_name'][0] - name, ext = os.path.splitext(save_name) - if save_name == img_name: - save_name = name + '_' + str(i) + ext - i += 1 - else: - img_name = save_name - save_name = name + '_' + str(i) + ext - i = 1 - - save_alpha_pred(alpha_pred_one, - os.path.join(save_dir, save_name)) - - batch_cost_averager.record( - time.time() - batch_start, num_samples=len(alpha_gt)) - batch_cost = batch_cost_averager.get_average() - reader_cost = reader_cost_averager.get_average() - - if local_rank == 0 and print_detail: - show_list = [(k, v) for k, v in metrics_data.items()] - show_list = show_list + [('batch_cost', batch_cost), - ('reader cost', reader_cost)] - progbar_val.update(iter + 1, show_list) - - reader_cost_averager.reset() - batch_cost_averager.reset() - batch_start = time.time() - - for key in metrics_ins.keys(): - metrics_data[key] = metrics_ins[key].evaluate() - log_str = '[EVAL] ' - for key, value in metrics_data.items(): - log_str = log_str + key + ': {:.4f}, '.format(value) - log_str = log_str[:-2] - - logger.info(log_str) - return metrics_data diff --git a/spaces/Flux9665/IMS-Toucan/Layers/__init__.py b/spaces/Flux9665/IMS-Toucan/Layers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/model_param_init.py b/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/model_param_init.py deleted file mode 100644 index b995c0bfb1194746187692e2ab1c2a6dbaaaec6c..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/model_param_init.py +++ /dev/null @@ -1,69 +0,0 @@ -import json -import os -import pathlib - -default_param = {} -default_param["bins"] = 768 -default_param["unstable_bins"] = 9 # training only -default_param["reduction_bins"] = 762 # training only -default_param["sr"] = 44100 -default_param["pre_filter_start"] = 757 -default_param["pre_filter_stop"] = 768 -default_param["band"] = {} - - -default_param["band"][1] = { - "sr": 11025, - "hl": 128, - "n_fft": 960, - "crop_start": 0, - "crop_stop": 245, - "lpf_start": 61, # inference only - "res_type": "polyphase", -} - -default_param["band"][2] = { - "sr": 44100, - "hl": 512, - "n_fft": 1536, - "crop_start": 24, - "crop_stop": 547, - "hpf_start": 81, # inference only - "res_type": "sinc_best", -} - - -def int_keys(d): - r = {} - for k, v in d: - if k.isdigit(): - k = int(k) - r[k] = v - return r - - -class ModelParameters(object): - def __init__(self, config_path=""): - if ".pth" == pathlib.Path(config_path).suffix: - import zipfile - - with zipfile.ZipFile(config_path, "r") as zip: - self.param = json.loads( - zip.read("param.json"), object_pairs_hook=int_keys - ) - elif ".json" == pathlib.Path(config_path).suffix: - with open(config_path, "r") as f: - self.param = json.loads(f.read(), object_pairs_hook=int_keys) - else: - self.param = default_param - - for k in [ - "mid_side", - "mid_side_b", - "mid_side_b2", - "stereo_w", - "stereo_n", - "reverse", - ]: - if not k in self.param: - self.param[k] = False diff --git a/spaces/GAIR/Factool/factool/code/pipeline.py b/spaces/GAIR/Factool/factool/code/pipeline.py deleted file mode 100644 index b4de1de0dc6749e3991445aa65b9c5b5d29f0203..0000000000000000000000000000000000000000 --- a/spaces/GAIR/Factool/factool/code/pipeline.py +++ /dev/null @@ -1,274 +0,0 @@ -import logging -import copy -import pdb -import math -import os -import json -import yaml -import time -import re -from typing import List, Dict - -from factool.utils.base.pipeline import pipeline -from factool.code.helper.postprocess import PostProcessor -from factool.code.helper.execution import evaluate_test_cases_multi_solution -from factool.utils.utils_json import CustomJSONEncoder - -class code_pipeline(pipeline): - def __init__(self, foundation_model, multi_solution_cnt, testcases_input_cnt): - super().__init__('code', foundation_model) - - self.multi_solution_cnt = multi_solution_cnt - self.testcases_input_cnt = testcases_input_cnt - - with open(os.path.join(self.prompts_path, "query_generation.yaml"), 'r') as file: - data = yaml.load(file, Loader=yaml.FullLoader) - self.query_generation_prompt = data['code'] - - async def _testcases_input_generation(self, batch, testcases_input_cnt): - messages_list = [] - if self.company == 'openai': - messages_list = [ - [ - {"role": "system", "content": self.query_generation_prompt['system']}, - {"role": "user", - "content": - self.query_generation_prompt[ - 'user_testcases_' + str(testcases_input_cnt) - ].format(input_question=sample['prompt'], - entry_point=sample['entry_point']) - }, - ] - for sample in batch - ] - elif self.company == 'anthropic': - messages_list = [self.query_generation_prompt[ - 'user_testcases_' + str(testcases_input_cnt) - ].format(input_question=sample['prompt'], - entry_point=sample['entry_point']) - for sample in batch] - return await self.chat.async_run(messages_list, Dict) - - async def _multi_solution_generation(self, batch, multi_solution_cnt): - bsize = 15 - messages_list = [ - [ - {"role": "system", "content": self.query_generation_prompt['system']}, - {"role": "user", "content": self.query_generation_prompt[ - 'user_solutions'].format(input_question=sample['prompt'], - entry_point=sample['entry_point'])}, - ] - for sample in batch - ] - - final_messages_list = [copy.deepcopy(messages) - for messages in messages_list - for _ in range(multi_solution_cnt) - ] - - responses = [] - for i in range(0, len(final_messages_list), bsize): - batch = final_messages_list[i:i + bsize] - responses += await self.chat.async_run(batch, Dict) - - # Split the list into lists of length of multi_solution_cnt - responses_split = [responses[i:i + multi_solution_cnt] - for i in range(0, len(responses), - multi_solution_cnt)] - - # Transform each element in each list - multi_solutions = [] - for solutions in responses_split: - key_names = [f"python_solution_{i}" - for i in range(1, multi_solution_cnt + 1)] - new_element = {key: solutions[i]['python_solution'] - if solutions[i] != None else "None" for i, key in enumerate(key_names)} - multi_solutions.append(new_element) - - return multi_solutions - - async def run_with_tool_live(self, batch, batch_size): - testcases_input = await self._testcases_input_generation(batch, self.testcases_input_cnt) - multi_solutions = await self._multi_solution_generation(batch, self.multi_solution_cnt) - - if testcases_input == None or multi_solutions == None: - return None - - responses = [] - for i in range(batch_size): - response = {'testcases_input': [], - 'multi_solutions': [], 'with_tool_classification': "None"} - try: - response['testcases_input'] = list(testcases_input[i].values()) - # Append the solution to be verified to the LAST element - # of multi_solutions - response['multi_solutions']\ - = [multi_solutions[i][f'python_solution_{j}'] - for j in range(1, self.multi_solution_cnt + 1)] +\ - [batch[i]['completion']] - except: - response['testcases_input'] = ["None"] * self.testcases_input_cnt - response['multi_solutions'] = ["None"] * (self.multi_solution_cnt + 1) - - exec_result = evaluate_test_cases_multi_solution( - batch[i]['prompt'], response['testcases_input'], - response['multi_solutions'], timeout=0.1) - response['exec_result'] = exec_result - - response['with_tool_classification'] = True - # must pass all testcases to be classified as "True" - for testcase_result in exec_result: - # syntax or timeout error happening on the potential solution - if isinstance(testcase_result[-1], str) \ - and testcase_result[-1].startswith('FAILURE'): - response['with_tool_classification'] = False - # majority voting. Note that the last element - # is the solution to be verified. Also, multi solutions that return "FAILURE" are not counted and removed. - else: - failure_indices = [ - i for i, res in enumerate(testcase_result[:-1]) - if isinstance(res, str) and res.startswith('FAILURE')] - testcase_result = [ - res for i, res in enumerate(testcase_result) - if i not in failure_indices] - - try: - if testcase_result[:-1].count(testcase_result[-1]) \ - < math.ceil(len(testcase_result) / 2): - response['with_tool_classification'] = False - # sometimes numpy array is included in testcase_result, so this error will be raised - except: - response['with_tool_classification'] = False - - responses.append(response) - - return responses - - async def run_with_tool_api_call(self, prompts, responses, entry_points): - - # response preprocessing to extract the code snippet: - claims = [] - for i, response in enumerate(responses): - if "```python" in response: - match = re.search(r"```python\n(.*?)\n```", response, re.DOTALL) - if match: - claims.append(match.group(1)) - else: - claims.append("") - elif "```" in response: - match = re.search(r"```\n(.*?)\n```", response, re.DOTALL) - if match: - claims.append(match.group(1)) - else: - claims.append("") - else: - claims.append(response) - - batch_size = 5 - num_batches = math.ceil(len(prompts) / batch_size) - - self.sample_list = [ - {"prompt": prompt, "response": response, - "entry_point": entry_point, "completion": claim, - "category": 'code'} - for prompt, response, entry_point, claim - in zip(prompts, responses, entry_points, claims)] - - for i in range(num_batches): - print(i) - batch_start = i * batch_size - batch_end = min((i + 1) * batch_size, len(responses)) - - responses_returned = await self.run_with_tool_live(self.sample_list[batch_start: batch_end], batch_end - batch_start) - - for j, response_returned in enumerate(responses_returned): - index = batch_start + j - self.sample_list[index].update({ - 'claim': self.sample_list[index]['completion'], - 'testcases_queries': response_returned['testcases_input'], - 'potential_solutions_queries': response_returned['multi_solutions'], - 'exec_results': response_returned['exec_result'], - 'claim_level_factuality': response_returned['with_tool_classification'], - 'response_level_factuality': response_returned['with_tool_classification'] - }) - del self.sample_list[index]["completion"] - - return self.sample_list - - async def run_with_tool_dataset(self, annotated_dataset_path: str, with_tool_classified_dataset_path: str, rerun: bool = False, rerun_indices: list = []): - data_path = with_tool_classified_dataset_path if rerun else annotated_dataset_path - with open(data_path, 'r') as f: - data = [json.loads(line) for line in f] - self.sample_list = data - rerun_elements = self.sample_list if not rerun else [self.sample_list[i] for i in rerun_indices] - - batch_size = 5 - num_batches = math.ceil(len(rerun_elements) / batch_size) # 5 - - for i in range(num_batches): - print(i) - batch_start = i * batch_size - batch_end = min((i + 1) * batch_size, len(rerun_elements)) - - responses = await self.run_with_tool_live(rerun_elements[batch_start:batch_end], batch_end - batch_start) - - for j, response in enumerate(responses): - index = batch_start + j if not rerun else rerun_indices[batch_start + j] - self.sample_list[index]['with_tool_classification'] = response['with_tool_classification'] if response is not None else 'None' - if response is not None: - self.sample_list[index].update({ - 'testcases_input': response['testcases_input'], - 'multi_solutions': response['multi_solutions'], - 'exec_result': response['exec_result'] - }) - - # save everything after each batch to prevent data loss - with open(with_tool_classified_dataset_path, 'w') as f: - for item in self.sample_list: - try: - json_str = json.dumps(item, cls=CustomJSONEncoder) - except: - continue - f.write(json_str + '\n') - - async def run_self_check_live(self, fewshot, batch): - user_prompt_key = 'user_3_shot_CoT' if fewshot else 'user_zero_shot_CoT' - messages_list = [ - [ - {"role": "system", "content": self.self_check_prompt['system']}, - {"role": "user", "content": self.self_check_prompt[user_prompt_key].format(input_question=response['prompt'], input_solution=response['completion'])}, - ] - for response in batch - ] - return await self.chat.async_run(messages_list, Dict) - - async def run_self_check_dataset(self, annotated_dataset_path: str, self_check_classified_dataset_path: str, fewshot: bool = False, rerun: bool = False, rerun_indices: list = []): - if rerun == False: - with open(annotated_dataset_path, 'r') as f: - self.sample_list = [json.loads(line) for line in f] - rerun_elements = self.sample_list - else: - with open(self_check_classified_dataset_path, 'r') as f: - self.sample_list = [json.loads(line) for line in f] - rerun_elements = [self.sample_list[i] for i in rerun_indices] - - batch_size = 5 - num_batches = math.ceil(len(rerun_elements) / batch_size) - - for i in range(num_batches): - print(i) - batch_start = i * batch_size - batch_end = (i + 1) * batch_size - batch = rerun_elements[batch_start:batch_end] - - responses = await self.run_self_check_live(fewshot, batch) - for j, response in enumerate(responses): - index = batch_start + j if rerun == False else rerun_indices[batch_start + j] - self.sample_list[index]['self_check_classification'] = response.get('factuality', 'None') if response is not None else 'None' - self.sample_list[index]['self_check_reasoning'] = response.get('reasoning', 'None') if response is not None else 'None' - - # save everything after each batch to prevent data loss - with open(self_check_classified_dataset_path, 'w') as f: - for item in self.sample_list: - json_str = json.dumps(item) - f.write(json_str + '\n') \ No newline at end of file diff --git a/spaces/GIZ/SDSN-demo/utils/checkconfig.py b/spaces/GIZ/SDSN-demo/utils/checkconfig.py deleted file mode 100644 index 605670c7921a89f3730d6f146e6a481cb83da184..0000000000000000000000000000000000000000 --- a/spaces/GIZ/SDSN-demo/utils/checkconfig.py +++ /dev/null @@ -1,15 +0,0 @@ -import configparser -import logging - -def getconfig(configfile_path:str): - """ - configfile_path: file path of .cfg file - """ - - config = configparser.ConfigParser() - - try: - config.read_file(open(configfile_path)) - return config - except: - logging.warning("config file not found") \ No newline at end of file diff --git a/spaces/Gaeomg/Kaludi-chatgpt-gpt4-prompts-bart-large-cnn-samsum/app.py b/spaces/Gaeomg/Kaludi-chatgpt-gpt4-prompts-bart-large-cnn-samsum/app.py deleted file mode 100644 index eea338ea7e7f53cd332fc2610d2f81856ce23cb0..0000000000000000000000000000000000000000 --- a/spaces/Gaeomg/Kaludi-chatgpt-gpt4-prompts-bart-large-cnn-samsum/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum").launch() \ No newline at end of file diff --git a/spaces/Gators123/fusf_pdf_2023/directory_parser.py b/spaces/Gators123/fusf_pdf_2023/directory_parser.py deleted file mode 100644 index e23ba88363ef2c29902bf36ae574c0c716cc8ba0..0000000000000000000000000000000000000000 --- a/spaces/Gators123/fusf_pdf_2023/directory_parser.py +++ /dev/null @@ -1,306 +0,0 @@ -# Code takes in a provided directory containing files to be parsed and classified. Results returned in Excel sheet - -# 1. Ask user for API Key (unless they already provided it in the code), and file directory -# 2. Finds .pdf files in directory, runs read_string_text_from_file to obtain text from each pdf -# 3. Runs that text through other_info and fus_model to classify the pdfs, and add that information to lists -# 4. Runs write_excel to write inforomation from the lists into excel - -# Classifications are: fus/non-fus, ML type, treatment Cycle, medical indication, keywords - -# Before running, ensure API Key is entered, change directory with pdf files and directory for excel sheet accordingly - -import os -from datetime import date -from langchain.document_loaders import PyPDFLoader -from langchain.chat_models import ChatOpenAI -from joblib import load -import xlsxwriter -from langchain.chat_models import ChatOpenAI -from langchain.schema import ( - HumanMessage, - SystemMessage -) -from dotenv import load_dotenv - -# _________________________________________________________________ - -# Global variables -other_list = [] -fus_list = [] -ml_list = [] - - -# Sets API Key -api_key = os.environ['OPENAI_API_KEY'] = 'ENTER API KEY HERE' - -# Function to intake API key while running code (will not run if key is already assigned above) -def ask_api(): - api_key = (input("Please enter your API Key: ")) - os.environ['OPENAI_API_KEY'] = api_key - - -# Code to generate summary (no API) -def read_string_text_from_file(pdf_file): - - loader = PyPDFLoader(pdf_file) - - doc = loader.load_and_split() - - stringtxt = str(doc) - - - # Had to convert into summary from string to list in order to remove '\n' from text - mylist = [] - final_string_no_lines = '' # Final text that is returned - - for char in stringtxt: - mylist.append(char) - - # Finds the indices where '\n' is present - pop_index = [] - for word in range(0,len(mylist)): - if mylist[word] =='\\' and mylist[word+1]=='n': - pop_index.append(word) - pop_index.append(word+1) - - # Replaces those indices with an empty space - for word in pop_index: - mylist[word] = ' ' - - # Converts cleaned list back into string and returns - for i in mylist: - final_string_no_lines+=i - - return final_string_no_lines - - -# Uses API to classify pdf files -def other_info(pdf_file): - - loader = PyPDFLoader(pdf_file) - - doc = loader.load_and_split() - - stringtxt = str(doc) - - - # Had to convert into summary from string to list in order to remove '\n' from text - mylist = [] - final_string_no_lines = '' # Final text that is used for model - - for char in stringtxt: - mylist.append(char) - - # Finds the indices where '\n' is present - pop_index = [] - for word in range(0,len(mylist)): - if mylist[word] =='\\' and mylist[word+1]=='n': - pop_index.append(word) - pop_index.append(word+1) - - # Replaces those indices with an empty space - for word in pop_index: - mylist[word] = ' ' - - # Converts cleaned list back into string and returns - for i in mylist: - final_string_no_lines+=i - - load_dotenv() - - chat = ChatOpenAI(openai_api_key=api_key) - - ml_type_messages = [ - SystemMessage(content='''Classify the article into Supervised Machine Learning, Unsupervised Machine Learning, Both, or None, and include brackets around the answer. - On a new line, write a short blurb justifying why:'''), - HumanMessage(content=final_string_no_lines[0:4000]) # Note will only use first 4000 characters as classifier, due to limit on API - ] - - - treatment_cycle_messages = [ - SystemMessage(content='''Classify the article in one of the following treatment cycles, including brackets around the answer: - [Treatment Planning, Treatment Monitoring and Results Analysis, Patient Selection, Clinical Decision Support]. - On a new line, write a short blurb justifying why:'''), - HumanMessage(content=final_string_no_lines[0:4000]) - ] - - - medical_indication_messages = [ - SystemMessage(content='''Classify the article in one of the following medical indications, including brackets around the answer: - [Cardiovascular, Emerging Indications, Gynelogical, Neurological (blood-brain-barrier opening), Neurosurgery, Oncological, Urological (prostate), Veterinary, Other]. - On a new line, write a short blurb justifying why.'''), - HumanMessage(content=final_string_no_lines[0:4000]) - ] - - key_word_messages = [ - SystemMessage(content='''Pick some of the keywords, and ONLY KEY WORDS LISTED BELOW that you feel the article encompasses. Provide in a numbered list: - [Angular spectrum, Artificial intelligence, Artificial neural networks, Auto encoders, Bio-heat transfer, Cat Swarm Operation, Chaotic krill her algorithm (CKHA), CIVA HealthCare platform, Classification, - Coefficient based method, Computed tomography (CT), Computer architecture, Convolutional neural network (CNN), Decision trees, Deep CNN, Deep leaning, Diagnostic imaging,, Differential equation solver, - Encoder-decoder, Fourier transform, Functional mapping, Functional neurosurgery, FUS monitoring, Generative adversarial networks (GAN), Global convolutional networks, Harmonic motion imaging, - HIFU Artifact, Image filtering, Intelligent theranostics, Joint Mutual Information (JMI), K means clustering, Kapur entropy, K-nearest neighbor, Logistic regression, Magnetic resonance imaging (MRI), - Medical diagnostics, Metamodel, Multilayer Perception (MLP), Multistage neural network, Mutual Information Maximisation (MIM), Naive Bayes classifier, NDE, Neural network, Neuromodulation, - Numerical model, Partial dependence plots, Photon counting CT, Prediction, Preoperative prediction, Principal component analysis, Prognosis, Radiomics, Random forest, Rayleigh-Sommerfeld, Real-time lesion tracking, - Regression models (linear and logistic), Residual, Rule based decision tree method, Segmentation, Skull density ratio, Support vector classification (SVC) model, Support vector machines, SWOT, Temperature monitoring, Transfer learning, - Transformers, Ultrasonography, Ultrasound (US), U-net (CNN, Encoder, Decoder, Autoencoder), Unsupervised learning, VGG Net, Vision transformers (ViT), Wiener Filtering]. Remember to only use the keywords in the list above'''), - HumanMessage(content=final_string_no_lines[0:4000]) - - ] - - title = [ - SystemMessage(content='''What is the title of the article? Return nothing but the title'''), - HumanMessage(content=final_string_no_lines[0:4000]) - ] - - - return chat(ml_type_messages).content, chat(treatment_cycle_messages).content, chat(medical_indication_messages).content, chat(key_word_messages).content, chat(title).content - - - -# FUS/Non-fus classification -def fus_model(pdf_file): - - # Loads FUS model with Joblib - fus_model = load('fus_model.joblib') - - prediction = fus_model.predict_proba([read_string_text_from_file(pdf_file)]) - - percentage_pos = (prediction[0][0])*100 - percentage_neg = (prediction[0][1])*100 - - return 'Focused Ultrasound Related: ' + str((round(percentage_pos,1)))+'%'+' '+'Non-Fus: '+ str((round(percentage_neg,1)))+'%' - - -# _____________________________________________________________________ - -# Where the result excel file will be located - CHANGE -results_dir = "C:\\Users\\fuzhe\\OneDrive\\Documents\\2023 Summer Intern NLP Project 81123\\fusf_pdf_2023\\result_excel_files\\" - - -# Function to write all the info to excel -def write_excel(fus,other_list): - - today = date.today() - - xlsx_file = results_dir + 'Directory_data_' + str(today) + '.xlsx' - - workbook = xlsxwriter.Workbook(xlsx_file) - worksheet = workbook.add_worksheet('First Sheet') - - - - format_header = workbook.add_format({'font_color': 'blue', 'font_name': 'Arial', 'font_size': '10', 'valign': 'top'}) - wrap_format = workbook.add_format({'font_name': 'Arial', 'font_size': '10', 'valign': 'top', 'text_wrap': True}) - worksheet.set_row(0, None, format_header) - - - worksheet.set_column(0, 0, 40) - worksheet.set_column(1, 1, 30) - worksheet.set_column(2, 2, 30) - worksheet.set_column(3, 3, 50) - worksheet.set_column(4, 4, 50) - worksheet.set_column(5, 5, 30) - - worksheet.write(0,0,'Name') - worksheet.write(0,1,'Fus / NonFus') - worksheet.write(0,2,'ML Type') - worksheet.write(0,3,'Treatment Cycle') - worksheet.write(0,4,'Medical Indications') - worksheet.write(0,5,'Keyword(s)') - - - title_row_index=1 - for title in other_list: - - worksheet.set_row(title_row_index, 50) - - worksheet.write(title_row_index, 0, title[4], wrap_format) - - title_row_index+=1 - - - fus_row_index=1 - for fus in fus_list: - - worksheet.write(fus_row_index, 1 ,fus, wrap_format) - - fus_row_index+=1 - - - ml_row_index=1 - for ml in other_list: - - worksheet.write(ml_row_index, 2, ml[0], wrap_format) - - ml_row_index+=1 - - - cycle_row_index=1 - for cycle in other_list: - - worksheet.write(cycle_row_index, 3, cycle[1], wrap_format) - - cycle_row_index+=1 - - - indication_row_index=1 - for indication in other_list: - - worksheet.write(indication_row_index, 4, indication[2], wrap_format) - - indication_row_index+=1 - - keywords_row_index=1 - for keyword in other_list: - - worksheet.write(keywords_row_index, 5, keyword[3], wrap_format) - - keywords_row_index+=1 - - workbook.close() - -# Function to intake directory, output all the pdf files in that directory -def get_pdf_paths(directory): - pdf_paths = [] - for root, dirs, files in os.walk(directory): - for file in files: - if file.lower().endswith(".pdf"): - pdf_paths.append(os.path.join(root, file)) - return pdf_paths - -#___________________________________________________________________ - -# Prompts user to enter API key if one isn't already present -if 'OPENAI_API_KEY' not in os.environ: - ask_api() -else: - print('API Key Receieved...') - - -# Get pdf paths from the directory - CHANGE -pdf_paths = get_pdf_paths(r"C:\Users\fuzhe\OneDrive\Documents\2023 Summer Intern NLP Project 81123\PubMed_PDFs") - -# Gives user chance to enter directory while running the program -user_input = input("Enter the directory path where your PDFs are located (if you specified path in code press the enter key): ") - -if user_input != '': - pdf_paths = get_pdf_paths(user_input) - - -# Displays the PDFs found in the provided directory -print("\nPDF files found in provided directory:") -for path in pdf_paths: - print(path) - - -print('\nProcessing files and writing to excel sheet...') - - -for path in pdf_paths: - other_list.append(other_info(path)) - fus_list.append(fus_model(path)) - - -#Writes information to the Excel sheet -write_excel(fus_list,other_list) - -print(f'\nSucessfully written to: {str(results_dir)}') \ No newline at end of file diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/distribution.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/distribution.py deleted file mode 100644 index d3119a5ba1e77bc25a92d2664f83d366f12399c0..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/distribution.py +++ /dev/null @@ -1,132 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F - - -def log_sum_exp(x): - """ numerically stable log_sum_exp implementation that prevents overflow """ - # TF ordering - axis = len(x.size()) - 1 - m, _ = torch.max(x, dim=axis) - m2, _ = torch.max(x, dim=axis, keepdim=True) - return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) - - -# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py -def discretized_mix_logistic_loss(y_hat, y, num_classes=65536, - log_scale_min=None, reduce=True): - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - y_hat = y_hat.permute(0,2,1) - assert y_hat.dim() == 3 - assert y_hat.size(1) % 3 == 0 - nr_mix = y_hat.size(1) // 3 - - # (B x T x C) - y_hat = y_hat.transpose(1, 2) - - # unpack parameters. (B, T, num_mixtures) x 3 - logit_probs = y_hat[:, :, :nr_mix] - means = y_hat[:, :, nr_mix:2 * nr_mix] - log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min) - - # B x T x 1 -> B x T x num_mixtures - y = y.expand_as(means) - - centered_y = y - means - inv_stdv = torch.exp(-log_scales) - plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1)) - cdf_plus = torch.sigmoid(plus_in) - min_in = inv_stdv * (centered_y - 1. / (num_classes - 1)) - cdf_min = torch.sigmoid(min_in) - - # log probability for edge case of 0 (before scaling) - # equivalent: torch.log(F.sigmoid(plus_in)) - log_cdf_plus = plus_in - F.softplus(plus_in) - - # log probability for edge case of 255 (before scaling) - # equivalent: (1 - F.sigmoid(min_in)).log() - log_one_minus_cdf_min = -F.softplus(min_in) - - # probability for all other cases - cdf_delta = cdf_plus - cdf_min - - mid_in = inv_stdv * centered_y - # log probability in the center of the bin, to be used in extreme cases - # (not actually used in our code) - log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in) - - # tf equivalent - """ - log_probs = tf.where(x < -0.999, log_cdf_plus, - tf.where(x > 0.999, log_one_minus_cdf_min, - tf.where(cdf_delta > 1e-5, - tf.log(tf.maximum(cdf_delta, 1e-12)), - log_pdf_mid - np.log(127.5)))) - """ - # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value - # for num_classes=65536 case? 1e-7? not sure.. - inner_inner_cond = (cdf_delta > 1e-5).float() - - inner_inner_out = inner_inner_cond * \ - torch.log(torch.clamp(cdf_delta, min=1e-12)) + \ - (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) - inner_cond = (y > 0.999).float() - inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out - cond = (y < -0.999).float() - log_probs = cond * log_cdf_plus + (1. - cond) * inner_out - - log_probs = log_probs + F.log_softmax(logit_probs, -1) - - if reduce: - return -torch.mean(log_sum_exp(log_probs)) - else: - return -log_sum_exp(log_probs).unsqueeze(-1) - - -def sample_from_discretized_mix_logistic(y, log_scale_min=None): - """ - Sample from discretized mixture of logistic distributions - Args: - y (Tensor): B x C x T - log_scale_min (float): Log scale minimum value - Returns: - Tensor: sample in range of [-1, 1]. - """ - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - assert y.size(1) % 3 == 0 - nr_mix = y.size(1) // 3 - - # B x T x C - y = y.transpose(1, 2) - logit_probs = y[:, :, :nr_mix] - - # sample mixture indicator from softmax - temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) - temp = logit_probs.data - torch.log(- torch.log(temp)) - _, argmax = temp.max(dim=-1) - - # (B, T) -> (B, T, nr_mix) - one_hot = to_one_hot(argmax, nr_mix) - # select logistic parameters - means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1) - log_scales = torch.clamp(torch.sum( - y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min) - # sample from logistic & clip to interval - # we don't actually round to the nearest 8bit value when sampling - u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) - x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u)) - - x = torch.clamp(torch.clamp(x, min=-1.), max=1.) - - return x - - -def to_one_hot(tensor, n, fill_with=1.): - # we perform one hot encore with respect to the last axis - one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() - if tensor.is_cuda: - one_hot = one_hot.cuda() - one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) - return one_hot diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py deleted file mode 100644 index b4b050dda5d2d752c0db3c83c434879c8765a272..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(bbox_head=dict(num_classes=20)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 1a1f49cf6b112afdadf1841571f51b98c010ddf8..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index fb2be22f8bc2e10cdfba4f58b2ad1ced913b4ea4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 9cb7952cede58165d2ed0f35d2208ad1ffb65232..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py deleted file mode 100644 index 02507ccb7e2f5f25014c451dcf9ba51c3a61dadc..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', - '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=59), - auxiliary_head=dict(num_classes=59), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/Hallucinate/demo/ldm/util.py b/spaces/Hallucinate/demo/ldm/util.py deleted file mode 100644 index 8ba38853e7a07228cc2c187742b5c45d7359b3f9..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/ldm/util.py +++ /dev/null @@ -1,203 +0,0 @@ -import importlib - -import torch -import numpy as np -from collections import abc -from einops import rearrange -from functools import partial - -import multiprocessing as mp -from threading import Thread -from queue import Queue - -from inspect import isfunction -from PIL import Image, ImageDraw, ImageFont - - -def log_txt_as_img(wh, xc, size=10): - # wh a tuple of (width, height) - # xc a list of captions to plot - b = len(xc) - txts = list() - for bi in range(b): - txt = Image.new("RGB", wh, color="white") - draw = ImageDraw.Draw(txt) - font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) - nc = int(40 * (wh[0] / 256)) - lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) - - try: - draw.text((0, 0), lines, fill="black", font=font) - except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") - - txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 - txts.append(txt) - txts = np.stack(txts) - txts = torch.tensor(txts) - return txts - - -def ismap(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] > 3) - - -def isimage(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def mean_flat(tensor): - """ - https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def count_params(model, verbose=False): - total_params = sum(p.numel() for p in model.parameters()) - if verbose: - print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") - return total_params - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): - # create dummy dataset instance - - # run prefetching - if idx_to_fn: - res = func(data, worker_id=idx) - else: - res = func(data) - Q.put([idx, res]) - Q.put("Done") - - -def parallel_data_prefetch( - func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False -): - # if target_data_type not in ["ndarray", "list"]: - # raise ValueError( - # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." - # ) - if isinstance(data, np.ndarray) and target_data_type == "list": - raise ValueError("list expected but function got ndarray.") - elif isinstance(data, abc.Iterable): - if isinstance(data, dict): - print( - f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' - ) - data = list(data.values()) - if target_data_type == "ndarray": - data = np.asarray(data) - else: - data = list(data) - else: - raise TypeError( - f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." - ) - - if cpu_intensive: - Q = mp.Queue(1000) - proc = mp.Process - else: - Q = Queue(1000) - proc = Thread - # spawn processes - if target_data_type == "ndarray": - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate(np.array_split(data, n_proc)) - ] - else: - step = ( - int(len(data) / n_proc + 1) - if len(data) % n_proc != 0 - else int(len(data) / n_proc) - ) - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate( - [data[i: i + step] for i in range(0, len(data), step)] - ) - ] - processes = [] - for i in range(n_proc): - p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) - processes += [p] - - # start processes - print(f"Start prefetching...") - import time - - start = time.time() - gather_res = [[] for _ in range(n_proc)] - try: - for p in processes: - p.start() - - k = 0 - while k < n_proc: - # get result - res = Q.get() - if res == "Done": - k += 1 - else: - gather_res[res[0]] = res[1] - - except Exception as e: - print("Exception: ", e) - for p in processes: - p.terminate() - - raise e - finally: - for p in processes: - p.join() - print(f"Prefetching complete. [{time.time() - start} sec.]") - - if target_data_type == 'ndarray': - if not isinstance(gather_res[0], np.ndarray): - return np.concatenate([np.asarray(r) for r in gather_res], axis=0) - - # order outputs - return np.concatenate(gather_res, axis=0) - elif target_data_type == 'list': - out = [] - for r in gather_res: - out.extend(r) - return out - else: - return gather_res diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/setup.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/setup.py deleted file mode 100644 index 6a21f7e2ee0840a3b251522275a0b32a856951d7..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/dynamicconv_layer/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from setuptools import setup -from torch.utils.cpp_extension import BuildExtension, CUDAExtension - - -setup( - name="dynamicconv_layer", - ext_modules=[ - CUDAExtension( - name="dynamicconv_cuda", - sources=[ - "dynamicconv_cuda.cpp", - "dynamicconv_cuda_kernel.cu", - ], - ), - ], - cmdclass={"build_ext": BuildExtension}, -) diff --git a/spaces/Hexamind/GDOC/src/tools/llms.py b/spaces/Hexamind/GDOC/src/tools/llms.py deleted file mode 100644 index 0b205a75ef919b4a3d0e487b80add075d58f21aa..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/GDOC/src/tools/llms.py +++ /dev/null @@ -1,22 +0,0 @@ -from langchain.llms import OpenAI -# from transformers import AutoTokenizer, AutoModelForCausalLM -import os - - -OpenAI_KEY = "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" -os.environ["OPENAI_API_KEY"] = OpenAI_KEY - -openai_llm = OpenAI(temperature=0) #CHAT GPT MODEL - -# llm_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") #LAMA MODEL - -SERPAPI_API_KEY = "dba90c4ecfa942f37e2b9eb2e7c6600ef7fb5c02ab8bbfacef426773df14c06b" -os.environ["SERPAPI_API_KEY"] = SERPAPI_API_KEY - - -""" -HF_API_KEY = "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" -hf_llm = HuggingFaceHub(repo_id="google/flan-t5-small", - model_kwargs={"temperature": 0, "max_length": 1000}, - huggingfacehub_api_token=HF_API_KEY) -""" diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp b/spaces/ICML2022/OFA/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp deleted file mode 100644 index 707219105a17a691e43de1296a72bbaffa0c7fe9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright (c) Microsoft Corporation. -Licensed under the MIT License. -*/ - -#include -#include - -/* -CPP Binding for CUDA OP -*/ - -// CUDA forward declarations -torch::Tensor ngram_repeat_block_cuda_forward( - torch::Tensor tokens, - torch::Tensor lprobs, - int bsz, - int step, - int beam_size, - int no_repeat_ngram_size); - -#define CHECK_CUDA(x) \ - TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -// Input check and call to CUDA OP -// Backward method not required -torch::Tensor ngram_repeat_block_forward( - torch::Tensor tokens, - torch::Tensor lprobs, - int bsz, - int step, - int beam_size, - int no_repeat_ngram_size) { - CHECK_INPUT(tokens); - CHECK_INPUT(lprobs); - assert(bsz > 0); - assert(step >= 0); - assert(beam_size > 0); - assert(no_repeat_ngram_size > 0); - - return ngram_repeat_block_cuda_forward( - tokens, lprobs, bsz, step, beam_size, no_repeat_ngram_size); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def( - "forward", - &ngram_repeat_block_forward, - "No Repeat Ngram Block forward (CUDA)"); -} diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/test.py b/spaces/Iceclear/StableSR/StableSR/basicsr/test.py deleted file mode 100644 index 53cb3b7aa860c90518e15ba76e1a55fdf404bcc2..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/test.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -import torch -from os import path as osp - -from basicsr.data import build_dataloader, build_dataset -from basicsr.models import build_model -from basicsr.utils import get_env_info, get_root_logger, get_time_str, make_exp_dirs -from basicsr.utils.options import dict2str, parse_options - - -def test_pipeline(root_path): - # parse options, set distributed setting, set ramdom seed - opt, _ = parse_options(root_path, is_train=False) - - torch.backends.cudnn.benchmark = True - # torch.backends.cudnn.deterministic = True - - # mkdir and initialize loggers - make_exp_dirs(opt) - log_file = osp.join(opt['path']['log'], f"test_{opt['name']}_{get_time_str()}.log") - logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) - logger.info(get_env_info()) - logger.info(dict2str(opt)) - - # create test dataset and dataloader - test_loaders = [] - for _, dataset_opt in sorted(opt['datasets'].items()): - test_set = build_dataset(dataset_opt) - test_loader = build_dataloader( - test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) - logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}") - test_loaders.append(test_loader) - - # create model - model = build_model(opt) - - for test_loader in test_loaders: - test_set_name = test_loader.dataset.opt['name'] - logger.info(f'Testing {test_set_name}...') - model.validation(test_loader, current_iter=opt['name'], tb_logger=None, save_img=opt['val']['save_img']) - - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - test_pipeline(root_path) diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/encoders/modules.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/encoders/modules.py deleted file mode 100644 index d2ac91a1205d6746e75ba173170080f2f37ce377..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/encoders/modules.py +++ /dev/null @@ -1,484 +0,0 @@ -import torch -import torch.nn as nn -from functools import partial -import clip -from einops import rearrange, repeat -import transformers -from transformers import CLIPTokenizer, CLIPTextModel -import kornia - -from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test -from .transformer_utils import CLIPTextTransformer_M -import open_clip - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class TransformerEmbedder(AbstractEncoder): - """Some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): - super().__init__() - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer)) - - def forward(self, tokens): - tokens = tokens.to(self.device) # meh - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, x): - return self(x) - - -class BERTTokenizer(AbstractEncoder): - """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__(self, device="cuda", vq_interface=True, max_length=77): - super().__init__() - from transformers import BertTokenizerFast # TODO: add to reuquirements - self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - self.device = device - self.vq_interface = vq_interface - self.max_length = max_length - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - return tokens - - @torch.no_grad() - def encode(self, text): - tokens = self(text) - if not self.vq_interface: - return tokens - return None, None, [None, None, tokens] - - def decode(self, text): - return text - - -class BERTEmbedder(AbstractEncoder): - """Uses the BERT tokenizr model and add some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, - device="cuda",use_tokenizer=True, embedding_dropout=0.0): - super().__init__() - self.use_tknz_fn = use_tokenizer - if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer), - emb_dropout=embedding_dropout) - - def forward(self, text): - if self.use_tknz_fn: - tokens = self.tknz_fn(text)#.to(self.device) - else: - tokens = text - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, text): - # output of length 77 - return self(text) - - -class SpatialRescaler(nn.Module): - def __init__(self, - n_stages=1, - method='bilinear', - multiplier=0.5, - in_channels=3, - out_channels=None, - bias=False): - super().__init__() - self.n_stages = n_stages - assert self.n_stages >= 0 - assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] - self.multiplier = multiplier - self.interpolator = partial(torch.nn.functional.interpolate, mode=method) - self.remap_output = out_channels is not None - if self.remap_output: - print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') - self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) - - def forward(self,x): - for stage in range(self.n_stages): - x = self.interpolator(x, scale_factor=self.multiplier) - - - if self.remap_output: - x = self.channel_mapper(x) - return x - - def encode(self, x): - return self(x) - -class FrozenOpenCLIPEmbedder(AbstractEncoder): - """ - Uses the OpenCLIP transformer encoder for text - """ - LAYERS = [ - #"pooled", - "last", - "penultimate" - ] - def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77, - freeze=True, layer="last"): - super().__init__() - assert layer in self.LAYERS - model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version) - del model.visual - self.model = model - - self.device = device - self.max_length = max_length - if freeze: - self.freeze() - self.layer = layer - if self.layer == "last": - self.layer_idx = 0 - elif self.layer == "penultimate": - self.layer_idx = 1 - else: - raise NotImplementedError() - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = open_clip.tokenize(text) - z = self.encode_with_transformer(tokens.to(self.device)) - return z - - def encode_with_transformer(self, text): - x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model] - x = x + self.model.positional_embedding - x = x.permute(1, 0, 2) # NLD -> LND - x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.model.ln_final(x) - return x - - def text_transformer_forward(self, x: torch.Tensor, attn_mask = None): - for i, r in enumerate(self.model.transformer.resblocks): - if i == len(self.model.transformer.resblocks) - self.layer_idx: - break - if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting(): - x = checkpoint(r, x, attn_mask) - else: - x = r(x, attn_mask=attn_mask) - return x - - def encode(self, text): - return self(text) - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - -class FinetuningCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - setattr(transformers.models.clip.modeling_clip,"CLIPTextTransformer", CLIPTextTransformer_M) - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - # self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - # batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - # return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - # tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(text) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - -class FrozenCLIPTextEmbedder(nn.Module): - """ - Uses the CLIP transformer encoder for text. - """ - def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): - super().__init__() - self.model, _ = clip.load(version, jit=False, device="cpu") - self.device = device - self.max_length = max_length - self.n_repeat = n_repeat - self.normalize = normalize - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = clip.tokenize(text).to(self.device) - z = self.model.encode_text(tokens) - if self.normalize: - z = z / torch.linalg.norm(z, dim=1, keepdim=True) - return z - - def encode(self, text): - z = self(text) - if z.ndim==2: - z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) - return z - -class FrozenClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - -class FrozenClipImageEmbedderNew(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - in_channels=1024, - output_channels=768, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - clip_model, _ = clip.load(name=model, device=device, jit=jit) - self.encoder = clip_model.visual - self.linear = nn.Linear(in_channels, output_channels) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - # x = kornia.geometry.resize(x, (224, 224), - # interpolation='bicubic',align_corners=True, - # antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - x = self.encoder(self.preprocess(x)).float() - x = self.linear(x) - return x - -class ClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - vision_layers=[2,2,2,2], - embed_dim=768, - vision_heads=64, - input_resolution=224, - vision_width=64, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - input_dim=3 - ): - super().__init__() - from clip.model import ModifiedResNet - self.encoder = ModifiedResNet( - layers=vision_layers, - output_dim=embed_dim, - heads=vision_heads, - input_resolution=input_resolution, - width=vision_width, - input_dim=input_dim - ) - - # self.pixel_unshuffle = nn.PixelUnshuffle(2) - - # self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - # self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - # def preprocess(self, x): - # # normalize to [0,1] - # x = (x + 1.) / 2. - # # renormalize according to clip - # x = kornia.enhance.normalize(x, self.mean, self.std) - # - # # return self.pixel_unshuffle(x) - # return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - x = self.encoder(x).float() - return x - -class ClipImageEmbedderOri(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - in_channels, - out_channels, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - self.freeze() - - self.final_projector = nn.Linear(in_channels, out_channels) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def freeze(self): - self.model = self.model.eval() - for param in self.model.parameters(): - param.requires_grad = False - - def forward(self, x): - # x is assumed to be in range [-1,1] - clip_fea = self.model.encode_image(self.preprocess(x)).float() - clip_fea = self.final_projector(clip_fea) - return clip_fea - -class ClipImage2TextEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - - -if __name__ == "__main__": - from ldm.util import count_params - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) diff --git a/spaces/Iceclear/StableSR/StableSR/taming/data/custom.py b/spaces/Iceclear/StableSR/StableSR/taming/data/custom.py deleted file mode 100644 index 33f302a4b55ba1e8ec282ec3292b6263c06dfb91..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/taming/data/custom.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import numpy as np -import albumentations -from torch.utils.data import Dataset - -from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex - - -class CustomBase(Dataset): - def __init__(self, *args, **kwargs): - super().__init__() - self.data = None - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - example = self.data[i] - return example - - - -class CustomTrain(CustomBase): - def __init__(self, size, training_images_list_file): - super().__init__() - with open(training_images_list_file, "r") as f: - paths = f.read().splitlines() - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - - -class CustomTest(CustomBase): - def __init__(self, size, test_images_list_file): - super().__init__() - with open(test_images_list_file, "r") as f: - paths = f.read().splitlines() - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - - diff --git a/spaces/Illumotion/Koboldcpp/tests/test-rope.cpp b/spaces/Illumotion/Koboldcpp/tests/test-rope.cpp deleted file mode 100644 index 26c1f42dc0e956c88adf99fee795c6fb4b465e5e..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/tests/test-rope.cpp +++ /dev/null @@ -1,221 +0,0 @@ -#include "ggml.h" - -#include -#include -#include -#include -#include - -#if defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data -#endif - -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wdouble-promotion" -#endif - -#define MAX_NARGS 3 - -#undef MIN -#undef MAX -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - -#define GGML_SILU_FP16 - -// -// logging -// - -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG(...) -#endif - -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_5(...) -#endif - -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_10(...) -#endif - -#define GGML_PRINT(...) printf(__VA_ARGS__) - -static float frand(void) { - return (float)rand()/(float)RAND_MAX; -} - -static int irand(int n) { - if (n == 0) return 0; - return rand()%n; -} - -static void get_random_dims(int64_t * dims, int ndims) { - dims[0] = dims[1] = dims[2] = dims[3] = 1; - - for (int i = 0; i < ndims; i++) { - dims[i] = 1 + irand(4); - } -} - -static struct ggml_tensor * get_random_tensor_f32( - struct ggml_context * ctx0, - int ndims, - const int64_t ne[], - float fmin, - float fmax) { - struct ggml_tensor * result = ggml_new_tensor(ctx0, GGML_TYPE_F32, ndims, ne); - - switch (ndims) { - case 1: - for (int i0 = 0; i0 < ne[0]; i0++) { - ((float *)result->data)[i0] = frand()*(fmax - fmin) + fmin; - } - break; - case 2: - for (int i1 = 0; i1 < ne[1]; i1++) { - for (int i0 = 0; i0 < ne[0]; i0++) { - ((float *)result->data)[i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin; - } - } - break; - case 3: - for (int i2 = 0; i2 < ne[2]; i2++) { - for (int i1 = 0; i1 < ne[1]; i1++) { - for (int i0 = 0; i0 < ne[0]; i0++) { - ((float *)result->data)[i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin; - } - } - } - break; - case 4: - for (int i3 = 0; i3 < ne[3]; i3++) { - for (int i2 = 0; i2 < ne[2]; i2++) { - for (int i1 = 0; i1 < ne[1]; i1++) { - for (int i0 = 0; i0 < ne[0]; i0++) { - ((float *)result->data)[i3*ne[2]*ne[1]*ne[0] + i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin; - } - } - } - } - break; - default: - assert(false); - }; - - return result; -} - -static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { - struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); - - if (plan.work_size > 0) { - buf.resize(plan.work_size); - plan.work_data = buf.data(); - } - - ggml_graph_compute(graph, &plan); -} - -int main(int /*argc*/, const char ** /*argv*/) { - struct ggml_init_params params = { - /* .mem_size = */ 128*1024*1024, - /* .mem_buffer = */ NULL, - /* .no_alloc = */ false, - }; - - std::vector work_buffer; - - struct ggml_context * ctx0 = ggml_init(params); - - struct ggml_tensor * x; - - // rope f32 - for (int m = 0; m < 3; ++m) { - const int ndims = 4; - - const int64_t n_rot = 128; - const int64_t ne[4] = { 2*n_rot, 32, 73, 1 }; - - const int n_past_0 = 100; - const int n_past_2 = 33; - - struct ggml_tensor * p0 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - struct ggml_tensor * p1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - struct ggml_tensor * p2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - - for (int i = 0; i < ne[2]; ++i) { - ((int32_t *) p0->data)[i] = n_past_0 + i; - ((int32_t *) p1->data)[i] = n_past_2 - n_past_0; - ((int32_t *) p2->data)[i] = n_past_2 + i; - } - - // test mode 0, 2, 4 (standard, GPT-NeoX, GLM) - const int mode = m == 0 ? 0 : m == 1 ? 2 : 4; - - x = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f); - - // 100, 101, 102, ..., 172 - struct ggml_tensor * r0 = ggml_rope(ctx0, x, p0, n_rot, mode, 1024); - // -67, -67, -67, ..., -67 - struct ggml_tensor * r1 = ggml_rope(ctx0, r0, p1, n_rot, mode, 1024); // "context swap", i.e. forget n_past_0 - n_past_2 tokens - - // 33, 34, 35, ..., 105 - struct ggml_tensor * r2 = ggml_rope(ctx0, x, p2, n_rot, mode, 1024); - - ggml_cgraph * gf = ggml_new_graph(ctx0); - - ggml_build_forward_expand(gf, r0); - ggml_build_forward_expand(gf, r1); - ggml_build_forward_expand(gf, r2); - - ggml_graph_compute_helper(work_buffer, gf, 4); - - // check that r1 and r2 are the same - { - double sum0 = 0.0f; - double sum1 = 0.0f; - double diff = 0.0f; - - const float * r1_data = (float *) r1->data; - const float * r2_data = (float *) r2->data; - - const int n_elements = ggml_nelements(r1); - - for (int i = 0; i < n_elements; ++i) { - sum0 += fabs(r1_data[i]); - sum1 += fabs(r2_data[i]); - diff += fabs(r1_data[i] - r2_data[i]); - //if (fabs(r1_data[i] - r2_data[i]) > 0.0001f) { - // printf("%d: %f %f\n", i, r1_data[i], r2_data[i]); - // printf("diff: %f\n", fabs(r1_data[i] - r2_data[i])); - //} - } - - //for (int i = 4096; i < 4096 + 128; ++i) { - // printf("%f %f\n", r1_data[i], r2_data[i]); - //} - - printf("mode: %d\n", mode); - printf("sum0: %f\n", sum0); - printf("sum1: %f\n", sum1); - printf("diff: %f\n", diff); - printf("rel err: %f\n", diff / sum0); - printf("rel err: %f\n", diff / sum1); - - GGML_ASSERT(diff / sum0 < 0.0001f); - GGML_ASSERT(diff / sum1 < 0.0001f); - } - } - - ggml_free(ctx0); - - return 0; -} - diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py deleted file mode 100644 index b64bf6ba3b3e7abbab375c6dd4a87d8239e62138..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/nn/modules/comm.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/JUNGU/remove-bg-edit/app.py b/spaces/JUNGU/remove-bg-edit/app.py deleted file mode 100644 index f08452a5b7e1484c17e0ab0369452bdaba874826..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/remove-bg-edit/app.py +++ /dev/null @@ -1,78 +0,0 @@ -import gradio as gr -import cv2 -import torch -import numpy as np -from torchvision import transforms - -title = "Remove Bg" -description = "Automatically remove the image background from a profile photo." -article = "

Blog | Github Repo

" - - -def make_transparent_foreground(pic, mask): - # split the image into channels - b, g, r = cv2.split(np.array(pic).astype('uint8')) - # add an alpha channel with and fill all with transparent pixels (max 255) - a = np.ones(mask.shape, dtype='uint8') * 255 - # merge the alpha channel back - alpha_im = cv2.merge([b, g, r, a], 4) - # create a transparent background - bg = np.zeros(alpha_im.shape) - # setup the new mask - new_mask = np.stack([mask, mask, mask, mask], axis=2) - # copy only the foreground color pixels from the original image where mask is set - foreground = np.where(new_mask, alpha_im, bg).astype(np.uint8) - - return foreground - - -def remove_background(input_image): - preprocess = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ]) - - input_tensor = preprocess(input_image) - input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model - - # move the input and model to GPU for speed if available - if torch.cuda.is_available(): - input_batch = input_batch.to('cuda') - model.to('cuda') - - with torch.no_grad(): - output = model(input_batch)['out'][0] - output_predictions = output.argmax(0) - - # create a binary (black and white) mask of the profile foreground - mask = output_predictions.byte().cpu().numpy() - background = np.zeros(mask.shape) - bin_mask = np.where(mask, 255, background).astype(np.uint8) - - foreground = make_transparent_foreground(input_image, bin_mask) - - return foreground, bin_mask - - -def inference(img): - foreground, _ = remove_background(img) - return foreground - - -torch.hub.download_url_to_file('https://pbs.twimg.com/profile_images/691700243809718272/z7XZUARB_400x400.jpg', - 'demis.jpg') -torch.hub.download_url_to_file('https://hai.stanford.edu/sites/default/files/styles/person_medium/public/2020-03/hai_1512feifei.png?itok=INFuLABp', - 'lifeifei.png') -model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True) -model.eval() - -gr.Interface( - inference, - gr.inputs.Image(type="pil", label="Input"), - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[['demis.jpg'], ['lifeifei.png']], - enable_queue=True -).launch(debug=False) diff --git a/spaces/Kedreamix/YoloGesture/nets/CSPdarknet.py b/spaces/Kedreamix/YoloGesture/nets/CSPdarknet.py deleted file mode 100644 index 1063ffdfc7d351f84892a6696015e37913588eac..0000000000000000000000000000000000000000 --- a/spaces/Kedreamix/YoloGesture/nets/CSPdarknet.py +++ /dev/null @@ -1,174 +0,0 @@ -import math -from collections import OrderedDict - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -#-------------------------------------------------# -# MISH激活函数 -#-------------------------------------------------# -class Mish(nn.Module): - def __init__(self): - super(Mish, self).__init__() - - def forward(self, x): - return x * torch.tanh(F.softplus(x)) - -#---------------------------------------------------# -# 卷积块 -> 卷积 + 标准化 + 激活函数 -# Conv2d + BatchNormalization + Mish -#---------------------------------------------------# -class BasicConv(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, stride=1): - super(BasicConv, self).__init__() - - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False) - self.bn = nn.BatchNorm2d(out_channels) - self.activation = Mish() - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.activation(x) - return x - -#---------------------------------------------------# -# CSPdarknet的结构块的组成部分 -# 内部堆叠的残差块 -#---------------------------------------------------# -class Resblock(nn.Module): - def __init__(self, channels, hidden_channels=None): - super(Resblock, self).__init__() - - if hidden_channels is None: - hidden_channels = channels - - self.block = nn.Sequential( - BasicConv(channels, hidden_channels, 1), - BasicConv(hidden_channels, channels, 3) - ) - - def forward(self, x): - return x + self.block(x) - -#--------------------------------------------------------------------# -# CSPdarknet的结构块 -# 首先利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩 -# 然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构 -# 主干部分会对num_blocks进行循环,循环内部是残差结构。 -# 对于整个CSPdarknet的结构块,就是一个大残差块+内部多个小残差块 -#--------------------------------------------------------------------# -class Resblock_body(nn.Module): - def __init__(self, in_channels, out_channels, num_blocks, first): - super(Resblock_body, self).__init__() - #----------------------------------------------------------------# - # 利用一个步长为2x2的卷积块进行高和宽的压缩 - #----------------------------------------------------------------# - self.downsample_conv = BasicConv(in_channels, out_channels, 3, stride=2) - - if first: - #--------------------------------------------------------------------------# - # 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构 - #--------------------------------------------------------------------------# - self.split_conv0 = BasicConv(out_channels, out_channels, 1) - - #----------------------------------------------------------------# - # 主干部分会对num_blocks进行循环,循环内部是残差结构。 - #----------------------------------------------------------------# - self.split_conv1 = BasicConv(out_channels, out_channels, 1) - self.blocks_conv = nn.Sequential( - Resblock(channels=out_channels, hidden_channels=out_channels//2), - BasicConv(out_channels, out_channels, 1) - ) - - self.concat_conv = BasicConv(out_channels*2, out_channels, 1) - else: - #--------------------------------------------------------------------------# - # 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构 - #--------------------------------------------------------------------------# - self.split_conv0 = BasicConv(out_channels, out_channels//2, 1) - - #----------------------------------------------------------------# - # 主干部分会对num_blocks进行循环,循环内部是残差结构。 - #----------------------------------------------------------------# - self.split_conv1 = BasicConv(out_channels, out_channels//2, 1) - self.blocks_conv = nn.Sequential( - *[Resblock(out_channels//2) for _ in range(num_blocks)], - BasicConv(out_channels//2, out_channels//2, 1) - ) - - self.concat_conv = BasicConv(out_channels, out_channels, 1) - - def forward(self, x): - x = self.downsample_conv(x) - - x0 = self.split_conv0(x) - - x1 = self.split_conv1(x) - x1 = self.blocks_conv(x1) - - #------------------------------------# - # 将大残差边再堆叠回来 - #------------------------------------# - x = torch.cat([x1, x0], dim=1) - #------------------------------------# - # 最后对通道数进行整合 - #------------------------------------# - x = self.concat_conv(x) - - return x - -#---------------------------------------------------# -# CSPdarknet53 的主体部分 -# 输入为一张416x416x3的图片 -# 输出为三个有效特征层 -#---------------------------------------------------# -class CSPDarkNet(nn.Module): - def __init__(self, layers): - super(CSPDarkNet, self).__init__() - self.inplanes = 32 - # 416,416,3 -> 416,416,32 - self.conv1 = BasicConv(3, self.inplanes, kernel_size=3, stride=1) - self.feature_channels = [64, 128, 256, 512, 1024] - - self.stages = nn.ModuleList([ - # 416,416,32 -> 208,208,64 - Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True), - # 208,208,64 -> 104,104,128 - Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False), - # 104,104,128 -> 52,52,256 - Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False), - # 52,52,256 -> 26,26,512 - Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False), - # 26,26,512 -> 13,13,1024 - Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False) - ]) - - self.num_features = 1 - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - - def forward(self, x): - x = self.conv1(x) - - x = self.stages[0](x) - x = self.stages[1](x) - out3 = self.stages[2](x) - out4 = self.stages[3](out3) - out5 = self.stages[4](out4) - - return out3, out4, out5 - -def darknet53(pretrained): - model = CSPDarkNet([1, 2, 8, 8, 4]) - if pretrained: - model.load_state_dict(torch.load("model_data/CSPdarknet53_backbone_weights.pth")) - return model diff --git a/spaces/Kevin676/AutoGPT/tests/milvus_memory_test.py b/spaces/Kevin676/AutoGPT/tests/milvus_memory_test.py deleted file mode 100644 index 84fd6e6d5006e781fa5e1065f949b2160537d913..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/tests/milvus_memory_test.py +++ /dev/null @@ -1,72 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for the MilvusMemory class.""" -import os -import sys -import unittest - -try: - from autogpt.memory.milvus import MilvusMemory - - def mock_config() -> dict: - """Mock the Config class""" - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "milvus_collection": "autogpt", - "milvus_addr": "localhost:19530", - }, - ) - - class TestMilvusMemory(unittest.TestCase): - """Tests for the MilvusMemory class.""" - - def setUp(self) -> None: - """Set up the test environment""" - self.cfg = mock_config() - self.memory = MilvusMemory(self.cfg) - - def test_add(self) -> None: - """Test adding a text to the cache""" - text = "Sample text" - self.memory.clear() - self.memory.add(text) - result = self.memory.get(text) - self.assertEqual([text], result) - - def test_clear(self) -> None: - """Test clearing the cache""" - self.memory.clear() - self.assertEqual(self.memory.collection.num_entities, 0) - - def test_get(self) -> None: - """Test getting a text from the cache""" - text = "Sample text" - self.memory.clear() - self.memory.add(text) - result = self.memory.get(text) - self.assertEqual(result, [text]) - - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache""" - text1 = "Sample text 1" - text2 = "Sample text 2" - self.memory.clear() - self.memory.add(text1) - self.memory.add(text2) - result = self.memory.get_relevant(text1, 1) - self.assertEqual(result, [text1]) - - def test_get_stats(self) -> None: - """Test getting the cache stats""" - text = "Sample text" - self.memory.clear() - self.memory.add(text) - stats = self.memory.get_stats() - self.assertEqual(15, len(stats)) - -except: - print("Milvus not installed, skipping tests") diff --git a/spaces/Kevin676/AutoGPT/tests/test_json_parser.py b/spaces/Kevin676/AutoGPT/tests/test_json_parser.py deleted file mode 100644 index 41c90a6f66c0b0468f1443de80033cc4f268eca0..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/tests/test_json_parser.py +++ /dev/null @@ -1,111 +0,0 @@ -import unittest - -import tests.context -from autogpt.json_utils.json_fix_llm import fix_and_parse_json - - -class TestParseJson(unittest.TestCase): - def test_valid_json(self): - # Test that a valid JSON string is parsed correctly - json_str = '{"name": "John", "age": 30, "city": "New York"}' - obj = fix_and_parse_json(json_str) - self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) - - def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt - json_str = '{"name": "John", "age": 30, "city": "New York",}' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - # Assert that this raises an exception: - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I suggest we start by browsing the repository to find any issues that we can fix. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/KevinGeng/Laronix_voice_quality_checking_system_FILEIO/model.py b/spaces/KevinGeng/Laronix_voice_quality_checking_system_FILEIO/model.py deleted file mode 100644 index 281d100e39c73fa9b8541e9fc9fdfac2d390ae0e..0000000000000000000000000000000000000000 --- a/spaces/KevinGeng/Laronix_voice_quality_checking_system_FILEIO/model.py +++ /dev/null @@ -1,191 +0,0 @@ -import torch -import torch.nn as nn -import fairseq -import os -import hydra - -def load_ssl_model(cp_path): - ssl_model_type = cp_path.split("/")[-1] - wavlm = "WavLM" in ssl_model_type - if wavlm: - checkpoint = torch.load(cp_path) - cfg = WavLMConfig(checkpoint['cfg']) - ssl_model = WavLM(cfg) - ssl_model.load_state_dict(checkpoint['model']) - if 'Large' in ssl_model_type: - SSL_OUT_DIM = 1024 - else: - SSL_OUT_DIM = 768 - else: - if ssl_model_type == "wav2vec_small.pt": - SSL_OUT_DIM = 768 - elif ssl_model_type in ["w2v_large_lv_fsh_swbd_cv.pt", "xlsr_53_56k.pt"]: - SSL_OUT_DIM = 1024 - else: - print("*** ERROR *** SSL model type " + ssl_model_type + " not supported.") - exit() - model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( - [cp_path] - ) - ssl_model = model[0] - ssl_model.remove_pretraining_modules() - return SSL_model(ssl_model, SSL_OUT_DIM, wavlm) - -class SSL_model(nn.Module): - def __init__(self,ssl_model,ssl_out_dim,wavlm) -> None: - super(SSL_model,self).__init__() - self.ssl_model, self.ssl_out_dim = ssl_model, ssl_out_dim - self.WavLM = wavlm - - def forward(self,batch): - wav = batch['wav'] - wav = wav.squeeze(1) # [batches, audio_len] - if self.WavLM: - x = self.ssl_model.extract_features(wav)[0] - else: - res = self.ssl_model(wav, mask=False, features_only=True) - x = res["x"] - return {"ssl-feature":x} - def get_output_dim(self): - return self.ssl_out_dim - - -class PhonemeEncoder(nn.Module): - ''' - PhonemeEncoder consists of an embedding layer, an LSTM layer, and a linear layer. - Args: - vocab_size: the size of the vocabulary - hidden_dim: the size of the hidden state of the LSTM - emb_dim: the size of the embedding layer - out_dim: the size of the output of the linear layer - n_lstm_layers: the number of LSTM layers - ''' - def __init__(self, vocab_size, hidden_dim, emb_dim, out_dim,n_lstm_layers,with_reference=True) -> None: - super().__init__() - self.with_reference = with_reference - self.embedding = nn.Embedding(vocab_size, emb_dim) - self.encoder = nn.LSTM(emb_dim, hidden_dim, - num_layers=n_lstm_layers, dropout=0.1, bidirectional=True) - self.linear = nn.Sequential( - nn.Linear(hidden_dim + hidden_dim*self.with_reference, out_dim), - nn.ReLU() - ) - self.out_dim = out_dim - - def forward(self,batch): - seq = batch['phonemes'] - lens = batch['phoneme_lens'] - reference_seq = batch['reference'] - reference_lens = batch['reference_lens'] - emb = self.embedding(seq) - emb = torch.nn.utils.rnn.pack_padded_sequence( - emb, lens, batch_first=True, enforce_sorted=False) - _, (ht, _) = self.encoder(emb) - feature = ht[-1] + ht[0] - if self.with_reference: - if reference_seq==None or reference_lens ==None: - raise ValueError("reference_batch and reference_lens should not be None when with_reference is True") - reference_emb = self.embedding(reference_seq) - reference_emb = torch.nn.utils.rnn.pack_padded_sequence( - reference_emb, reference_lens, batch_first=True, enforce_sorted=False) - _, (ht_ref, _) = self.encoder(emb) - reference_feature = ht_ref[-1] + ht_ref[0] - feature = self.linear(torch.cat([feature,reference_feature],1)) - else: - feature = self.linear(feature) - return {"phoneme-feature": feature} - def get_output_dim(self): - return self.out_dim - -class DomainEmbedding(nn.Module): - def __init__(self,n_domains,domain_dim) -> None: - super().__init__() - self.embedding = nn.Embedding(n_domains,domain_dim) - self.output_dim = domain_dim - def forward(self, batch): - return {"domain-feature": self.embedding(batch['domains'])} - def get_output_dim(self): - return self.output_dim - - -class LDConditioner(nn.Module): - ''' - Conditions ssl output by listener embedding - ''' - def __init__(self,input_dim, judge_dim, num_judges=None): - super().__init__() - self.input_dim = input_dim - self.judge_dim = judge_dim - self.num_judges = num_judges - assert num_judges !=None - self.judge_embedding = nn.Embedding(num_judges, self.judge_dim) - # concat [self.output_layer, phoneme features] - - self.decoder_rnn = nn.LSTM( - input_size = self.input_dim + self.judge_dim, - hidden_size = 512, - num_layers = 1, - batch_first = True, - bidirectional = True - ) # linear? - self.out_dim = self.decoder_rnn.hidden_size*2 - - def get_output_dim(self): - return self.out_dim - - - def forward(self, x, batch): - judge_ids = batch['judge_id'] - if 'phoneme-feature' in x.keys(): - concatenated_feature = torch.cat((x['ssl-feature'], x['phoneme-feature'].unsqueeze(1).expand(-1,x['ssl-feature'].size(1) ,-1)),dim=2) - else: - concatenated_feature = x['ssl-feature'] - if 'domain-feature' in x.keys(): - concatenated_feature = torch.cat( - ( - concatenated_feature, - x['domain-feature'] - .unsqueeze(1) - .expand(-1, concatenated_feature.size(1), -1), - ), - dim=2, - ) - if judge_ids != None: - concatenated_feature = torch.cat( - ( - concatenated_feature, - self.judge_embedding(judge_ids) - .unsqueeze(1) - .expand(-1, concatenated_feature.size(1), -1), - ), - dim=2, - ) - decoder_output, (h, c) = self.decoder_rnn(concatenated_feature) - return decoder_output - -class Projection(nn.Module): - def __init__(self, input_dim, hidden_dim, activation, range_clipping=False): - super(Projection, self).__init__() - self.range_clipping = range_clipping - output_dim = 1 - if range_clipping: - self.proj = nn.Tanh() - - self.net = nn.Sequential( - nn.Linear(input_dim, hidden_dim), - activation, - nn.Dropout(0.3), - nn.Linear(hidden_dim, output_dim), - ) - self.output_dim = output_dim - - def forward(self, x, batch): - output = self.net(x) - - # range clipping - if self.range_clipping: - return self.proj(output) * 2.0 + 3 - else: - return output - def get_output_dim(self): - return self.output_dim diff --git a/spaces/Kwasiasomani/Streamlit-Sentimental-Analysis/README.md b/spaces/Kwasiasomani/Streamlit-Sentimental-Analysis/README.md deleted file mode 100644 index f4cae641602ca6fec1a3f8cd9bfff72bf57a4206..0000000000000000000000000000000000000000 --- a/spaces/Kwasiasomani/Streamlit-Sentimental-Analysis/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Streamlit Sentimental Analysis -emoji: 📊 -colorFrom: green -colorTo: green -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/layers/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/models/layers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/LUOYE-123/QQsign/devices/device_8958.js b/spaces/LUOYE-123/QQsign/devices/device_8958.js deleted file mode 100644 index 455ddb0108b70276949e6539926481590a98e0d9..0000000000000000000000000000000000000000 --- a/spaces/LUOYE-123/QQsign/devices/device_8958.js +++ /dev/null @@ -1,344 +0,0 @@ -"use strict"; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.getApkInfo = exports.Platform = exports.Device = exports.generateFullDevice = exports.generateShortDevice = void 0; -const crypto_1 = require("crypto"); -const constants_1 = require("./constants"); -const axios_1 = __importDefault(require("axios")); -const algo_1 = require("./algo"); -function generateImei() { - let imei = `86${(0, constants_1.randomString)(12, '0123456789')}`; - function calcSP(imei) { - let sum = 0; - for (let i = 0; i < imei.length; ++i) { - if (i % 2) { - let j = parseInt(imei[i]) * 2; - sum += j % 10 + Math.floor(j / 10); - } - else { - sum += parseInt(imei[i]); - } - } - return (100 - sum) % 10; - } - return imei + calcSP(imei); -} -/** 生成短设备信息 */ -function generateShortDevice() { - const randstr = (length, num = false) => { - const map = num ? '0123456789' : '0123456789abcdef'; - return (0, constants_1.randomString)(length, map); - }; - return { - "--begin--": "该设备为随机生成,丢失后不能得到原先配置", - product: `ILPP-${randstr(5).toUpperCase()}`, - device: `${randstr(5).toUpperCase()}`, - board: `${randstr(5).toUpperCase()}`, - brand: `${randstr(4).toUpperCase()}`, - model: `ICQQ ${randstr(4).toUpperCase()}`, - wifi_ssid: `HUAWEI-${randstr(7)}`, - bootloader: `U-boot`, - android_id: `IL.${randstr(7, true)}.${randstr(4, true)}`, - boot_id: `${randstr(8)}-${randstr(4)}-${randstr(4)}-${randstr(4)}-${randstr(12)}`, - proc_version: `Linux version 5.10.101-android12-${randstr(8)}`, - mac_address: `2D:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}`, - ip_address: `192.168.${randstr(2, true)}.${randstr(2, true)}`, - imei: `${generateImei()}`, - incremental: `${randstr(10, true).toUpperCase()}`, - "--end--": "修改后可能需要重新验证设备。" - }; -} -exports.generateShortDevice = generateShortDevice; -/** 生成完整设备信息 */ -function generateFullDevice(apk, d) { - if (!d) - d = generateShortDevice(); - return { - display: d.android_id, - product: d.product, - device: d.device, - board: d.board, - brand: d.brand, - model: d.model, - bootloader: d.bootloader, - fingerprint: `${d.brand}/${d.product}/${d.device}:10/${d.android_id}/${d.incremental}:user/release-keys`, - boot_id: d.boot_id, - proc_version: d.proc_version, - baseband: "", - sim: "T-Mobile", - os_type: "android", - mac_address: d.mac_address, - ip_address: d.ip_address, - wifi_bssid: d.mac_address, - wifi_ssid: d.wifi_ssid, - imei: d.imei, - android_id: (0, constants_1.md5)(d.android_id).toString("hex"), - apn: "wifi", - version: { - incremental: d.incremental, - release: "10", - codename: "REL", - sdk: 29, - }, - imsi: (0, crypto_1.randomBytes)(16), - guid: (0, constants_1.md5)(Buffer.concat([Buffer.from(d.imei), Buffer.from(d.mac_address)])), - }; -} -exports.generateFullDevice = generateFullDevice; -class Device { - constructor(apk, d) { - this.apk = apk; - this.secret = 'ZdJqM15EeO2zWc08'; - this.publicKey = `-----BEGIN PUBLIC KEY----- -MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDEIxgwoutfwoJxcGQeedgP7FG9 -qaIuS0qzfR8gWkrkTZKM2iWHn2ajQpBRZjMSoSf6+KJGvar2ORhBfpDXyVtZCKpq -LQ+FLkpncClKVIrBwv6PHyUvuCb0rIarmgDnzkfQAqVufEtR64iazGDKatvJ9y6B -9NMbHddGSAUmRTCrHQIDAQAB ------END PUBLIC KEY-----`; - if (!d) - d = generateShortDevice(); - Object.assign(this, generateFullDevice(apk, d)); - } - async getQIMEI() { - if (this.apk.app_key === "") { - return; - } - const k = (0, constants_1.randomString)(16); - const key = (0, algo_1.encryptPKCS1)(this.publicKey, k); - const time = Date.now(); - const nonce = (0, constants_1.randomString)(16); - const payload = this.genRandomPayloadByDevice(); - const params = (0, algo_1.aesEncrypt)(JSON.stringify(payload), k).toString('base64'); - try { - const { data } = await axios_1.default.post("https://snowflake.qq.com/ola/android", { - key, - params, - time, nonce, - sign: (0, constants_1.md5)(key + params + time + nonce + this.secret).toString("hex"), - extra: '' - }, { - headers: { - 'User-Agent': `Dalvik/2.1.0 (Linux; U; Android ${this.version.release}; PCRT00 Build/N2G48H)`, - 'Content-Type': "application/json" - } - }); - if (data?.code !== 0) { - return; - } - const { q16, q36 } = JSON.parse((0, algo_1.aesDecrypt)(data.data, k)); - this.qImei16 = q16; - this.qImei36 = q36; - } - catch { - } - } - genRandomPayloadByDevice() { - const fixedRand = (max = 1, min = 0) => { - if (max < min) - [max, min] = [min, max]; - const diff = max - min; - return Math.floor(Math.random() * diff) + min; - }; - const reserved = { - "harmony": "0", - "clone": Math.random() > 0.5 ? "1" : "0", - "containe": "", - "oz": "", - "oo": "", - "kelong": Math.random() > 0.5 ? "1" : "0", - "uptimes": (0, constants_1.formatTime)(new Date()), - "multiUser": Math.random() > 0.5 ? "1" : "0", - "bod": this.board, - "brd": this.brand, - "dv": this.device, - "firstLevel": "", - "manufact": this.brand, - "name": this.model, - "host": "se.infra", - "kernel": this.fingerprint - }; - const timestamp = Date.now(); - this.mtime = this.mtime || Date.now(); - const mtime1 = new Date(this.mtime || Date.now()); - const dateFormat = (fmt, time = Date.now()) => (0, constants_1.formatTime)(time, fmt); - const mtimeStr1 = dateFormat("YYYY-mm-ddHHMMSS", mtime1) + "." + this.imei.slice(2, 11); - const mtime2 = new Date(this.mtime - parseInt(this.imei.slice(2, 4))); - const mtimeStr2 = dateFormat("YYYY-mm-ddHHMMSS", mtime2) + "." + this.imei.slice(5, 14); - let beaconIdArr = [ - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr1, - '0000000000000000', - (0, constants_1.md5)(this.android_id + this.imei).toString("hex").slice(0, 16), - ...new Array(4).fill(false).map((_) => fixedRand(10000000, 1000000)), - this.boot_id, - '1', - fixedRand(5, 0), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(50000, 10000), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr2, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((10 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(100, 10), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(5, 0), - ].map((str, idx) => `k${idx + 1}:${str}`); - return { - "androidId": this.android_id, - "platformId": 1, - "appKey": this.apk.app_key, - "appVersion": this.apk.version, - "beaconIdSrc": beaconIdArr.join(';'), - "brand": this.brand, - "channelId": "2017", - "cid": "", - "imei": this.imei, - "imsi": this.imsi.toString("hex"), - "mac": this.mac_address, - "model": this.model, - "networkType": "unknown", - "oaid": "", - "osVersion": `Android ${this.version.release},level ${this.version.sdk}`, - "qimei": "", - "qimei36": "", - "sdkVersion": "1.2.13.6", - "targetSdkVersion": "26", - "audit": "", - "userId": "{}", - "packageId": this.apk.id, - "deviceType": this.display, - "sdkName": "", - "reserved": JSON.stringify(reserved), - }; - } -} -exports.Device = Device; -/** 支持的登录设备平台 */ -var Platform; -(function (Platform) { - Platform[Platform["Android"] = 1] = "Android"; - Platform[Platform["aPad"] = 2] = "aPad"; - Platform[Platform["Watch"] = 3] = "Watch"; - Platform[Platform["iMac"] = 4] = "iMac"; - Platform[Platform["iPad"] = 5] = "iPad"; - Platform[Platform["Tim"] = 6] = "Tim"; -})(Platform = exports.Platform || (exports.Platform = {})); -const mobile = { - id: "com.tencent.mobileqq", - app_key: '0S200MNJT807V3GE', - name: "A8.9.58.11175", - version: "8.9.58.11175", - ver: "8.9.58", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1684467300, - appid: 16, - subid: 537163194, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2545", - display: "Android_8.9.58", - qua: 'V1_AND_SQ_8.9.58_4108_YYB_D', - ssover: 20, -}; -const tim = { - id: "com.tencent.tim", - app_key: '0S200MNJT807V3GE', - name: "A3.5.1.3168", - version: "3.5.1.3168", - ver: "3.5.1", - sign: Buffer.from('775e696d09856872fdd8ab4f3f06b1e0', 'hex'), - buildtime: 1630062176, - appid: 16, - subid: 537150355, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2484", - display: "Tim", - qua: "V1_AND_SQ_8.3.9_351_TIM_D", - ssover: 18, -}; -const watch = { - id: "com.tencent.qqlite", - app_key: '0S200MNJT807V3GE', - name: "A2.0.8", - version: "2.0.8", - ver: "2.0.8", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1559564731, - appid: 16, - subid: 537065138, - bitmap: 16252796, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2365", - display: "Watch", - qua: '', - ssover: 5 -}; -const hd = { - id: "com.tencent.minihd.qq", - app_key: '0S200MNJT807V3GE', - name: "A5.9.3.3468", - version: "5.9.3.3468", - ver: "5.9.3", - sign: Buffer.from('AA 39 78 F4 1F D9 6F F9 91 4A 66 9E 18 64 74 C7'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1637427966, - appid: 16, - subid: 537128930, - bitmap: 150470524, - main_sig_map: 1970400, - sub_sig_map: 66560, - sdkver: "6.0.0.2433", - display: "iMac", - qua: '', - ssover: 12 -}; -const apklist = { - [Platform.Android]: mobile, - [Platform.Tim]: tim, - [Platform.aPad]: { - ...mobile, - subid: 537163242, - display: 'aPad_8.9.58' - }, - [Platform.Watch]: watch, - [Platform.iMac]: { ...hd }, - [Platform.iPad]: { - ...mobile, - subid: 537155074, - sign: hd.sign, - name: '8.9.50.611', - ver: '8.9.50', - sdkver: '6.0.0.2535', - qua: 'V1_AND_SQ_8.9.50_3898_YYB_D', - display: 'iPad' - }, -}; -function getApkInfo(p) { - return apklist[p] || apklist[Platform.Android]; -} -exports.getApkInfo = getApkInfo; diff --git a/spaces/LanQian/ChatChuanHu/README.md b/spaces/LanQian/ChatChuanHu/README.md deleted file mode 100644 index d1ae83f73ac14888dedce02615afaaaea7f3d7d5..0000000000000000000000000000000000000000 --- a/spaces/LanQian/ChatChuanHu/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChuanhuChatGPT -emoji: 🐠 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: JohnSmith9982/ChuanhuChatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Laronix/Laronix_ASR_TTS_VC/app.py b/spaces/Laronix/Laronix_ASR_TTS_VC/app.py deleted file mode 100644 index 037618843840f8695ebc7c40bf3d2441d613df18..0000000000000000000000000000000000000000 --- a/spaces/Laronix/Laronix_ASR_TTS_VC/app.py +++ /dev/null @@ -1,510 +0,0 @@ -""" -TODO: - + [x] Load Configuration - + [ ] Checking - + [ ] Better saving directory -""" -import numpy as np -from pathlib import Path -import torch.nn as nn -import torch -import torchaudio -from transformers import pipeline -from pathlib import Path -import datetime - -import pdb -# local import -import sys -from espnet2.bin.tts_inference import Text2Speech -from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -sys.path.append("src") - -import gradio as gr -from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq - -processor = AutoProcessor.from_pretrained("KevinGeng/whipser_medium_en_PAL300_step25") - -model = AutoModelForSpeechSeq2Seq.from_pretrained("KevinGeng/whipser_medium_en_PAL300_step25") - -transcriber = pipeline("automatic-speech-recognition", model="KevinGeng/whipser_medium_en_PAL300_step25") - -# Text2Mel models -# @title English multi-speaker pretrained model { run: "auto" } -lang = "English" -vits_tag = "kan-bayashi/libritts_xvector_vits" -ft2_tag = "kan-bayashi/libritts_xvector_conformer_fastspeech2" -transformer_tag = "kan-bayashi/libritts_xvector_transformer" - -# !!! vits needs no vocoder !!! -# Local Text2Mel models - -vits_config_local = "TTS_models/libritts_xvector_vits/config.yaml" -vits_model_local = "TTS_models/libritts_xvector_vits/train.total_count.ave_10best.pth" - -# TODO -ft2_config_local = "" -ft2_model_local= "" -transformer_config_local = "" -transformer_config_local = "" - -# Vocoders -vocoder_tag = "parallel_wavegan/vctk_parallel_wavegan.v1.long" # @param ["none", "parallel_wavegan/vctk_parallel_wavegan.v1.long", "parallel_wavegan/vctk_multi_band_melgan.v2", "parallel_wavegan/vctk_style_melgan.v1", "parallel_wavegan/vctk_hifigan.v1", "parallel_wavegan/libritts_parallel_wavegan.v1.long", "parallel_wavegan/libritts_multi_band_melgan.v2", "parallel_wavegan/libritts_hifigan.v1", "parallel_wavegan/libritts_style_melgan.v1"] {type:"string"} -hifigan_vocoder_tag = "parallel_wavegan/parallel_wavegan/libritts_hifigan.v1" # @param ["none", "parallel_wavegan/vctk_parallel_wavegan.v1.long", "parallel_wavegan/vctk_multi_band_melgan.v2", "parallel_wavegan/vctk_style_melgan.v1", "parallel_wavegan/vctk_hifigan.v1", "parallel_wavegan/libritts_parallel_wavegan.v1.long", "parallel_wavegan/libritts_multi_band_melgan.v2", "parallel_wavegan/libritts_hifigan.v1", "parallel_wavegan/libritts_style_melgan.v1"] {type:"string"} - -# Local Vocoders -## Make sure the use parallel_wavegan as prefix (PWG feature) -vocoder_tag_local = "parallel_wavegan/vctk_parallel_wavegan.v1.long" -hifigan_vocoder_tag_local = "parallel_wavegan/libritts_hifigan.v1" - -from espnet2.bin.tts_inference import Text2Speech -from espnet2.utils.types import str_or_none - -# local import -text2speech = Text2Speech.from_pretrained( - train_config = vits_config_local, - model_file=vits_model_local, - device="cuda", - use_att_constraint=False, - backward_window=1, - forward_window=3, - speed_control_alpha=1.0, -) - -# # Fastspeech2 -# ft2_text2speech = Text2Speech.from_pretrained( -# model_tag=ft2_tag, -# vocoder_tag=str_or_none(vocoder_tag_local), -# device="cuda", -# use_att_constraint=False, -# backward_window=1, -# forward_window=3, -# speed_control_alpha=1.0, -# ) - -# # Fastspeech2 + hifigan -# ft2_text2speech_hifi = Text2Speech.from_pretrained( -# model_tag=ft2_tag, -# vocoder_tag=str_or_none(hifigan_vocoder_tag_local), -# device="cuda", -# use_att_constraint=False, -# backward_window=1, -# forward_window=3, -# speed_control_alpha=1.0, -# ) - -# # transformer tag -# transformer_text2speech = Text2Speech.from_pretrained( -# model_tag=transformer_tag, -# vocoder_tag=str_or_none(vocoder_tag_local), -# device="cuda", -# use_att_constraint=False, -# backward_window=1, -# forward_window=3, -# speed_control_alpha=1.0, -# ) - -import glob -import os -import numpy as np -import kaldiio - -# Get model directory path -# from espnet_model_zoo.downloader import ModelDownloader - -# d = ModelDownloader() -# model_dir = os.path.dirname(d.download_and_unpack(tag)["train_config"]) - -# Speaker x-vector selection - -xvector_ark = [ - p - for p in glob.glob( - f"xvector/test-clean/spk_xvector.ark", recursive=True - ) - if "test" in p -][0] -xvectors = {k: v for k, v in kaldiio.load_ark(xvector_ark)} -spks = list(xvectors.keys()) - -male_spks = { - "Male1": "260_123286", - "Male2": "1320_122612", - "Male3": "672_122797" -} - -female_spks = {"Female1": "5683_32865", - "Female2": "121_121726", - "Female3": "8463_287645"} - -spks = dict(male_spks, **female_spks) -spk_names = sorted(spks.keys()) - -def ASRTTS(audio_file, spk_name, ref_text=""): - spk = spks[spk_name] - spembs = xvectors[spk] - if ref_text == "": - reg_text = transcriber(audio_file)["text"] - else: - reg_text = ref_text - - speech, sr = torchaudio.load( - audio_file, channels_first=True - ) # Mono channel - wav_tensor_spembs = text2speech( - text=reg_text, speech=speech, spembs=spembs - )["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - save_id = ( - "./wav/" + Path(audio_file).stem + "_" + spk_name + "_spkembs.wav" - ) - torchaudio.save( - save_id, - src=wav_tensor_spembs.unsqueeze(0).to("cpu"), - sample_rate=22050, - ) - - return save_id, reg_text - - -def ASRTTS_clean(audio_file, spk_name): - spk = spks[spk_name] - spembs = xvectors[spk] - - reg_text = transcriber(audio_file)["text"] - - speech, sr = torchaudio.load( - audio_file, channels_first=True - ) # Mono channel - wav_tensor_spembs = text2speech( - text=reg_text, speech=speech, spembs=spembs - )["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - - # create another saving id wav file with current time stamp in YYYYMMDD_HHMMSS format - save_id = ( - "./wav/" + str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + ".wav" - ) - - torchaudio.save( - save_id, - src=wav_tensor_spembs.unsqueeze(0).to("cpu"), - sample_rate=22050, - ) - return save_id - - -def ft2_ASRTTS_clean(audio_file, spk_name): - spk = spks[spk_name] - spembs = xvectors[spk] - - reg_text = transcriber(audio_file)["text"] - - speech, sr = torchaudio.load( - audio_file, channels_first=True - ) # Mono channel - wav_tensor_spembs = ft2_text2speech( - text=reg_text, speech=speech, spembs=spembs - )["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - save_id = ( - "./wav/" + Path(audio_file).stem + "_fs2_" + spk_name + "_spkembs.wav" - ) - torchaudio.save( - save_id, - src=wav_tensor_spembs.unsqueeze(0).to("cpu"), - sample_rate=22050, - ) - return save_id - -def ft2_ASRTTS_clean_hifi(audio_file, spk_name): - spk = spks[spk_name] - spembs = xvectors[spk] - - reg_text = transcriber(audio_file)["text"] - - speech, sr = torchaudio.load( - audio_file, channels_first=True - ) # Mono channel - wav_tensor_spembs = ft2_text2speech_hifi( - text=reg_text, speech=speech, spembs=spembs - )["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - save_id = ( - "./wav/" + Path(audio_file).stem + "_fs2_hifi_" + spk_name + "_spkembs.wav" - ) - torchaudio.save( - save_id, - src=wav_tensor_spembs.unsqueeze(0).to("cpu"), - sample_rate=22050, - ) - return save_id - -def transformer_ASRTTS_clean(audio_file, spk_name): - spk = spks[spk_name] - spembs = xvectors[spk] - - reg_text = transcriber(audio_file)["text"] - - speech, sr = torchaudio.load( - audio_file, channels_first=True - ) # Mono channel - wav_tensor_spembs = transformer_text2speech( - text=reg_text, speech=speech, spembs=spembs - )["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - save_id = ( - "./wav/" + Path(audio_file).stem + "_transformer_" + spk_name + "_spkembs.wav" - ) - torchaudio.save( - save_id, - src=wav_tensor_spembs.unsqueeze(0).to("cpu"), - sample_rate=22050, - ) - return save_id - -# def google_ASRTTS_clean(audio_file, spk_name): -# spk = spks[spk_name] -# spembs = xvectors[spk] - -# reg_text = transcriber(audio_file)["text"] -# # pdb.set_trace() -# synthesis_input = texttospeech.SynthesisInput(text=reg_text) -# voice = texttospeech.VoiceSelectionParams( -# language_code="en-US", ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL -# ) -# audio_config = texttospeech.AudioConfig( -# audio_encoding=texttospeech.AudioEncoding.MP3 -# ) -# response = Google_TTS_client.synthesize_speech( -# input=synthesis_input, voice=voice, audio_config=audio_config -# ) - -# save_id = ( -# "./wav/" + Path(audio_file).stem + "_google_" + spk_name + "_spkembs.wav" - -# ) -# with open(save_id, "wb") as out_file: -# out_file.write(response.audio_content) - -# return save_id - - -reference_textbox = gr.Textbox( - value="", - placeholder="Input reference here", - label="Reference", -) - -recognization_textbox = gr.Textbox( - value="", - placeholder="Output recognization here", - label="recognization_textbox", -) - -speaker_option = gr.Radio(choices=spk_names, label="Speaker") - -input_audio = gr.Audio( - source="upload", type="filepath", label="Audio_to_Evaluate" -) -output_audio = gr.Audio( - source="upload", file="filepath", label="Synthesized Audio" -) -examples = [ - ["./samples/001.wav", "M1", ""], - ["./samples/002.wav", "M2", ""], - ["./samples/003.wav", "F1", ""], - ["./samples/004.wav", "F2", ""], -] - -def change_audiobox(choice): - if choice == "upload": - input_audio = gr.Audio(source="upload", visible=True) - elif choice == "microphone": - input_audio = gr.Audio(source="microphone", visible=True) - else: - input_audio = gr.Audio(visible=False) - return input_audio - - -def show_icon(choice): - if choice == "Male1": - spk_icon = gr.Image.update(value="speaker_icons/male1.png", visible=True) - elif choice == "Male2": - spk_icon = gr.Image.update(value="speaker_icons/male2.png", visible=True) - elif choice == "Male3": - spk_icon = gr.Image.update(value="speaker_icons/male3.png", visible=True) - elif choice == "Female1": - spk_icon = gr.Image.update(value="speaker_icons/female1.png", visible=True) - elif choice == "Female2": - spk_icon = gr.Image.update(value="speaker_icons/female2.png", visible=True) - elif choice == "Female3": - spk_icon = gr.Image.update(value="speaker_icons/female3.png", visible=True) - return spk_icon - -def get_download_file(audio_file=None): - if audio_file == None: - output_audio_file = gr.File.update(visible=False) - else: - output_audio_file = gr.File.update(visible=True) - return output_audio_file - -def download_file(audio_file): - return gr.File(value=audio_file) -# pdb.set_trace() - -with gr.Blocks( - analytics_enabled=False, - css=".gradio-container {background-color: #78BD91}", -) as demo: - # Public Version - with gr.Tab("Open Version"): - with gr.Column(elem_id="Column"): - input_format = gr.Radio( - choices=["microphone", "upload"], label="Choose your input format", elem_id="input_format" - ) - input_audio = gr.Audio( - source="microphone", - type="filepath", - label="Input Audio", - interactive=True, - visible=False, - elem_id="input_audio" - ) - input_format.change( - fn=change_audiobox, inputs=input_format, outputs=input_audio - ) - - speaker_option = gr.Radio(choices=spk_names, value="Male1", label="Choose your voice profile") - spk_icon = gr.Image(value="speaker_icons/male1.png", - type="filepath", - image_mode="RGB", - source="upload", - shape=[50, 50], - interactive=True, - visible=True) - speaker_option.change( - fn=show_icon, inputs=speaker_option, outputs=spk_icon - ) - - b = gr.Button("Convert") - - output_audio = gr.Audio( - source="upload", file="filepath", label="Converted Audio", interactive=False - ) - - b.click( - ASRTTS_clean, - inputs=[input_audio, speaker_option], - outputs=output_audio, - api_name="convert" - ) - - # # Tab selection: - # with gr.Tab("Test Version: Multi TTS model"): - # with gr.Column(elem_id="Column"): - # input_format = gr.Radio( - # choices=["microphone", "upload"], label="Choose your input format", elem_id="input_format" - # ) - # input_audio = gr.Audio( - # source="microphone", - # type="filepath", - # label="Input Audio", - # interactive=True, - # visible=False, - # elem_id="input_audio" - # ) - # input_format.change( - # fn=change_audiobox, inputs=input_format, outputs=input_audio - # ) - - # speaker_option = gr.Radio(choices=spk_names, value="Male1", label="Choose your voice profile") - # spk_icon = gr.Image(value="speaker_icons/male1.png", - # type="filepath", - # image_mode="RGB", - # source="upload", - # shape=[50, 50], - # interactive=True, - # visible=True) - # speaker_option.change( - # fn=show_icon, inputs=speaker_option, outputs=spk_icon - # ) - # with gr.Column(): - # with gr.Row(): - # b2 = gr.Button("Convert") - - # output_audio = gr.Audio( - # source="upload", file="filepath", label="Converted Audio", interactive=False - # ) - - # b2.click( - # ASRTTS_clean, - # inputs=[input_audio, speaker_option], - # outputs=output_audio, - # api_name="convert_" - # ) - # with gr.Row(): - # # Fastspeech2 + PWG [under construction] - # b_ft2 = gr.Button("Convert_fastspeech2") - - # output_audio_ft2= gr.Audio( - # source="upload", file="filepath", label="Converted Audio", interactive=False - # ) - - # b_ft2.click( - # ft2_ASRTTS_clean, - # inputs=[input_audio, speaker_option], - # outputs=output_audio_ft2, - # api_name="convert_ft2" - # ) - # with gr.Row(): - # # Fastspeech2 + hifigan [under construction] - # b_ft2_hifi = gr.Button("Convert_fastspeech2+HifiGAN") - - # output_audio_ft2_hifi= gr.Audio( - # source="upload", file="filepath", label="Converted Audio", interactive=False - # ) - - # b_ft2_hifi.click( - # ft2_ASRTTS_clean_hifi, - # inputs=[input_audio, speaker_option], - # outputs=output_audio_ft2_hifi, - # api_name="convert_ft2_hifi" - # ) - # with gr.Row(): - # # transformer [TODO] - # b_transformer = gr.Button("Convert_transformer") - - # output_audio_transformer= gr.Audio( - # source="upload", file="filepath", label="Converted Audio", interactive=False - # ) - - # b_transformer.click( - # transformer_ASRTTS_clean, - # inputs=[input_audio, speaker_option], - # outputs=output_audio_transformer, - # api_name="convert_trans" - # ) - - # google tts [TODO] - # b_google = gr.Button("Convert_googleTTS") - - # output_audio_google= gr.Audio( - # source="upload", file="filepath", label="Converted Audio", interactive=False - # ) - - # b_google.click( - # google_ASRTTS_clean, - # inputs=[input_audio, speaker_option], - # outputs=output_audio_google, - # api_name="convert" - # ) - -demo.launch(share=False) \ No newline at end of file diff --git a/spaces/Learner/jax-diffuser-event-battlemaps/README.md b/spaces/Learner/jax-diffuser-event-battlemaps/README.md deleted file mode 100644 index 5fbdaeb3c89faf71591f2ff77444eaf2fa8a82c3..0000000000000000000000000000000000000000 --- a/spaces/Learner/jax-diffuser-event-battlemaps/README.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Jax Diffuser Event Battlemaps -emoji: 🎮 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -models: - - Learner/jax-diffuser-event - - runwayml/stable-diffusion-v1-5 -tags: - - jax-diffusers-event ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/sharpe.py b/spaces/Lianjd/stock_dashboard/backtrader/analyzers/sharpe.py deleted file mode 100644 index baaeab1db4e45728bb5ba34dee06552e2d533a16..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/sharpe.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import math - -from backtrader.utils.py3 import itervalues - -from backtrader import Analyzer, TimeFrame -from backtrader.mathsupport import average, standarddev -from backtrader.analyzers import TimeReturn, AnnualReturn - - -class SharpeRatio(Analyzer): - '''This analyzer calculates the SharpeRatio of a strategy using a risk free - asset which is simply an interest rate - - See also: - - - https://en.wikipedia.org/wiki/Sharpe_ratio - - Params: - - - ``timeframe``: (default: ``TimeFrame.Years``) - - - ``compression`` (default: ``1``) - - Only used for sub-day timeframes to for example work on an hourly - timeframe by specifying "TimeFrame.Minutes" and 60 as compression - - - ``riskfreerate`` (default: 0.01 -> 1%) - - Expressed in annual terms (see ``convertrate`` below) - - - ``convertrate`` (default: ``True``) - - Convert the ``riskfreerate`` from annual to monthly, weekly or daily - rate. Sub-day conversions are not supported - - - ``factor`` (default: ``None``) - - If ``None``, the conversion factor for the riskfree rate from *annual* - to the chosen timeframe will be chosen from a predefined table - - Days: 252, Weeks: 52, Months: 12, Years: 1 - - Else the specified value will be used - - - ``annualize`` (default: ``False``) - - If ``convertrate`` is ``True``, the *SharpeRatio* will be delivered in - the ``timeframe`` of choice. - - In most occasions the SharpeRatio is delivered in annualized form. - Convert the ``riskfreerate`` from annual to monthly, weekly or daily - rate. Sub-day conversions are not supported - - - ``stddev_sample`` (default: ``False``) - - If this is set to ``True`` the *standard deviation* will be calculated - decreasing the denominator in the mean by ``1``. This is used when - calculating the *standard deviation* if it's considered that not all - samples are used for the calculation. This is known as the *Bessels' - correction* - - - ``daysfactor`` (default: ``None``) - - Old naming for ``factor``. If set to anything else than ``None`` and - the ``timeframe`` is ``TimeFrame.Days`` it will be assumed this is old - code and the value will be used - - - ``legacyannual`` (default: ``False``) - - Use the ``AnnualReturn`` return analyzer, which as the name implies - only works on years - - - ``fund`` (default: ``None``) - - If ``None`` the actual mode of the broker (fundmode - True/False) will - be autodetected to decide if the returns are based on the total net - asset value or on the fund value. See ``set_fundmode`` in the broker - documentation - - Set it to ``True`` or ``False`` for a specific behavior - - Methods: - - - get_analysis - - Returns a dictionary with key "sharperatio" holding the ratio - - ''' - params = ( - ('timeframe', TimeFrame.Years), - ('compression', 1), - ('riskfreerate', 0.01), - ('factor', None), - ('convertrate', True), - ('annualize', False), - ('stddev_sample', False), - - # old behavior - ('daysfactor', None), - ('legacyannual', False), - ('fund', None), - ) - - RATEFACTORS = { - TimeFrame.Days: 252, - TimeFrame.Weeks: 52, - TimeFrame.Months: 12, - TimeFrame.Years: 1, - } - - def __init__(self): - if self.p.legacyannual: - self.anret = AnnualReturn() - else: - self.timereturn = TimeReturn( - timeframe=self.p.timeframe, - compression=self.p.compression, - fund=self.p.fund) - - def stop(self): - super(SharpeRatio, self).stop() - if self.p.legacyannual: - rate = self.p.riskfreerate - retavg = average([r - rate for r in self.anret.rets]) - retdev = standarddev(self.anret.rets) - - self.ratio = retavg / retdev - else: - # Get the returns from the subanalyzer - returns = list(itervalues(self.timereturn.get_analysis())) - - rate = self.p.riskfreerate # - - factor = None - - # Hack to identify old code - if self.p.timeframe == TimeFrame.Days and \ - self.p.daysfactor is not None: - - factor = self.p.daysfactor - - else: - if self.p.factor is not None: - factor = self.p.factor # user specified factor - elif self.p.timeframe in self.RATEFACTORS: - # Get the conversion factor from the default table - factor = self.RATEFACTORS[self.p.timeframe] - - if factor is not None: - # A factor was found - - if self.p.convertrate: - # Standard: downgrade annual returns to timeframe factor - rate = pow(1.0 + rate, 1.0 / factor) - 1.0 - else: - # Else upgrade returns to yearly returns - returns = [pow(1.0 + x, factor) - 1.0 for x in returns] - - lrets = len(returns) - self.p.stddev_sample - # Check if the ratio can be calculated - if lrets: - # Get the excess returns - arithmetic mean - original sharpe - ret_free = [r - rate for r in returns] - ret_free_avg = average(ret_free) - retdev = standarddev(ret_free, avgx=ret_free_avg, - bessel=self.p.stddev_sample) - - try: - ratio = ret_free_avg / retdev - - if factor is not None and \ - self.p.convertrate and self.p.annualize: - - ratio = math.sqrt(factor) * ratio - except (ValueError, TypeError, ZeroDivisionError): - ratio = None - else: - # no returns or stddev_sample was active and 1 return - ratio = None - - self.ratio = ratio - - self.rets['sharperatio'] = self.ratio - - -class SharpeRatio_A(SharpeRatio): - '''Extension of the SharpeRatio which returns the Sharpe Ratio directly in - annualized form - - The following param has been changed from ``SharpeRatio`` - - - ``annualize`` (default: ``True``) - - ''' - - params = ( - ('annualize', True), - ) diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/__init__.py b/spaces/LightChen2333/OpenSLU/model/decoder/interaction/__init__.py deleted file mode 100644 index 4e411af2bcfac5aeda51c746e5371f84300781e4..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from model.decoder.interaction.agif_interaction import AGIFInteraction -from model.decoder.interaction.base_interaction import BaseInteraction -from model.decoder.interaction.bi_model_interaction import BiModelInteraction, BiModelWithoutDecoderInteraction -from model.decoder.interaction.dca_net_interaction import DCANetInteraction -from model.decoder.interaction.gl_gin_interaction import GLGINInteraction -from model.decoder.interaction.slot_gated_interaction import SlotGatedInteraction -from model.decoder.interaction.stack_interaction import StackInteraction - -__all__ = ["BaseInteraction", "BiModelInteraction", "BiModelWithoutDecoderInteraction", "DCANetInteraction", - "StackInteraction", "SlotGatedInteraction", "AGIFInteraction", "GLGINInteraction"] diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/tps/crnn_tps_academic_dataset.py b/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/tps/crnn_tps_academic_dataset.py deleted file mode 100644 index 15607538d0c31de2e4baadf0b30d781f534b99bb..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/tps/crnn_tps_academic_dataset.py +++ /dev/null @@ -1,33 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', '../../_base_/recog_models/crnn_tps.py', - '../../_base_/recog_pipelines/crnn_tps_pipeline.py', - '../../_base_/recog_datasets/MJ_train.py', - '../../_base_/recog_datasets/academic_test.py', - '../../_base_/schedules/schedule_adadelta_5e.py' -] - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline = {{_base_.train_pipeline}} -test_pipeline = {{_base_.test_pipeline}} - -data = dict( - samples_per_gpu=64, - workers_per_gpu=4, - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline)) - -evaluation = dict(interval=1, metric='acc') - -cudnn_benchmark = True diff --git a/spaces/LucasCodeBreak/MusicGen/tests/common_utils/temp_utils.py b/spaces/LucasCodeBreak/MusicGen/tests/common_utils/temp_utils.py deleted file mode 100644 index d1e0367e979c8b9fea65472c373916d956ad5aaa..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/tests/common_utils/temp_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import tempfile - - -class TempDirMixin: - """Mixin to provide easy access to temp dir. - """ - - temp_dir_ = None - - @classmethod - def get_base_temp_dir(cls): - # If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory. - # this is handy for debugging. - key = "AUDIOCRAFT_TEST_DIR" - if key in os.environ: - return os.environ[key] - if cls.temp_dir_ is None: - cls.temp_dir_ = tempfile.TemporaryDirectory() - return cls.temp_dir_.name - - @classmethod - def tearDownClass(cls): - if cls.temp_dir_ is not None: - try: - cls.temp_dir_.cleanup() - cls.temp_dir_ = None - except PermissionError: - # On Windows there is a know issue with `shutil.rmtree`, - # which fails intermittenly. - # https://github.com/python/cpython/issues/74168 - # Following the above thread, we ignore it. - pass - super().tearDownClass() - - @property - def id(self): - return self.__class__.__name__ - - def get_temp_path(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(os.path.dirname(path), exist_ok=True) - return path - - def get_temp_dir(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(path, exist_ok=True) - return path diff --git a/spaces/LucasCodeBreak/MusicGen/tests/quantization/test_vq.py b/spaces/LucasCodeBreak/MusicGen/tests/quantization/test_vq.py deleted file mode 100644 index c215099fedacae35c6798fdd9b8420a447aa16bb..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/tests/quantization/test_vq.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.quantization.vq import ResidualVectorQuantizer - - -class TestResidualVectorQuantizer: - - def test_rvq(self): - x = torch.randn(1, 16, 2048) - vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8) - res = vq(x, 1.) - assert res.x.shape == torch.Size([1, 16, 2048]) diff --git a/spaces/MRroboto/Loacker_app/model.py b/spaces/MRroboto/Loacker_app/model.py deleted file mode 100644 index 8a72c3a22746a9ce2ed9f45080f5a521bb25896b..0000000000000000000000000000000000000000 --- a/spaces/MRroboto/Loacker_app/model.py +++ /dev/null @@ -1,148 +0,0 @@ -"""Pytorch implementation of AESc model architecture""" - -__author__ = "Jonas Rabensteiner" - -import torch -from torch import nn -from torchvision.transforms.functional import crop - -# create model - -def resize_layer(conv, deconv): - """If needed resize the feature map of the deconvolution to the size of the corresponding feature map from the encoder part. - - Args: - conv (tensor): feature map of the encoder part - deconv (tensor): corresponding feature map of the decoder part - - Returns: - tensor: resized decoder feature map - """ - height = deconv.shape[2] - width= deconv.shape[3] - if deconv.shape[2] > conv.shape[2]: - deconv = crop(deconv, top=0, left=0, height=height-1, width=width) - #nn.Cropping2D(cropping=((0, 1), (0, 0)))(deconv) - if deconv.shape[3] > conv.shape[3]: - deconv = crop(deconv, top=0, left=0, height=height, width=width-1) - #nn.Cropping2D(cropping=((0, 0), (0, 1)))(deconv) - return deconv - - -class conv_block(nn.Module): - """Layer "block" of 2D (de)convolution, batch normalization and activation""" - def __init__(self, in_c, out_c, kernel_size=5, stride=True, activation=nn.LeakyReLU()): - super().__init__() - if not stride: - self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel_size, padding="same") - else: - self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel_size, stride=2, padding=2) - self.bn = nn.BatchNorm2d(out_c) - self.activation = activation - - def forward(self, inputs): - x = self.conv(inputs) - x = self.bn(x) - x = self.activation(x) - return x - -class encoder_block(nn.Module): - """Layer "block" of conv_block and dropout, which corresponds to one "step" of the encoder""" - def __init__(self, in_c, out_c, kernel_size=5, activation=nn.LeakyReLU(), dropout_rate=0.0): - super().__init__() - self.conv = conv_block(in_c, out_c, kernel_size, activation) - self.dropout = nn.Dropout(dropout_rate) - - def forward(self, inputs): - x = self.conv(inputs) - x = self.dropout(x) - return x - - -class decoder_block(nn.Module): - """Layer "block" of upsampling, conv_block, dropout and skip connections which corresponds to one "step" of the decoder""" - def __init__(self, in_c, out_c, kernel_size=5, activation=nn.LeakyReLU(),dropout_rate=0.0, skip_connections=True): - super().__init__() - self.up = nn.UpsamplingNearest2d(scale_factor=2) - self.conv = conv_block(in_c, out_c, kernel_size, stride=False) - self.dropout = nn.Dropout(dropout_rate) - self.skip_connections = skip_connections - - def forward(self, inputs, skip): - #print(inputs.shape) - x = self.conv(inputs) - #print("skip shape", skip.shape) - x = resize_layer(skip, x) - #print("x resized shape", x.shape) - if self.skip_connections: - x = skip + x - x = self.up(x) - x = self.dropout(x) - #print(x.shape) - return x - - -class AESc(nn.Module): - """Autoencoder with skip connections (AESc) according to the original paper by Anne-Sophie Collin.""" - - def __init__(self, cmap = "rgb", kernel_size=5, activation=nn.LeakyReLU(), dropout_rate=0.0): - """Instantiates the model layers - - Args: - cmap (str, optional): color map to use for the model. Either "rgb" or "gray". Defaults to "rgb". - kernel_size (int, optional): kernel size. Defaults to 5. - activation (nn.Module, optional): activation function. Defaults to nn.LeakyReLU(). - dropout_rate (float, optional): dropout rate. Defaults to 0.0. - """ - super().__init__() - - #Encoder - if cmap == "gray": - self.e1 = encoder_block(1, 16, kernel_size, activation, dropout_rate) - else: - self.e1 = encoder_block(3, 16, kernel_size, activation, dropout_rate) - self.e2 = encoder_block(16, 32, kernel_size, activation, dropout_rate) - self.e3 = encoder_block(32, 64, kernel_size, activation, dropout_rate) - self.e4 = encoder_block(64, 128, kernel_size, activation, dropout_rate) - self.e5 = encoder_block(128, 256, kernel_size, activation, dropout_rate) - self.e6 = encoder_block(256, 512, kernel_size, activation, dropout_rate) - - #Decoder - self.d1 = nn.UpsamplingNearest2d(scale_factor=2) - self.d2 = decoder_block(512, 256, kernel_size, activation, dropout_rate) - self.d3 = decoder_block(256, 128, kernel_size, activation, dropout_rate) - self.d4 = decoder_block(128, 64, kernel_size, activation, dropout_rate) - self.d5 = decoder_block(64, 32, kernel_size, activation, dropout_rate) - self.d6 = decoder_block(32, 16, kernel_size, activation, dropout_rate) - - #Output - if cmap == "gray": - self.outputs = conv_block(16, 1, stride=False, activation=nn.Identity()) - else: - self.outputs = conv_block(16, 3, stride=False, activation=nn.Identity()) - self.sigmoid = nn.Sigmoid() - - - def forward(self, inputs): - #Encoder - p1 = self.e1(inputs) - p2 = self.e2(p1) - p3 = self.e3(p2) - p4 = self.e4(p3) - p5 = self.e5(p4) - p6 = self.e6(p5) - - - #Decoder - d1 = self.d1(p6) - d2 = self.d2(d1, p5) - d3 = self.d3(d2, p4) - d4 = self.d4(d3, p3) - d5 = self.d5(d4, p2) - d6 = self.d6(d5, p1) - - #Output - outputs = self.outputs(d6) - outputs = self.sigmoid(outputs) - outputs = resize_layer(inputs, outputs) - return outputs diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/japanese_bert.py b/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/japanese_bert.py deleted file mode 100644 index 5dd196483da4355746383253879190ce538b9df9..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/japanese_bert.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -import sys - -tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - -models = dict() - - -def get_bert_feature(text, word2ph, device=None): - if ( - sys.platform == "darwin" - and torch.backends.mps.is_available() - and device == "cpu" - ): - device = "mps" - if not device: - device = "cuda" - if device not in models.keys(): - models[device] = AutoModelForMaskedLM.from_pretrained( - "./bert/bert-base-japanese-v3" - ).to(device) - with torch.no_grad(): - inputs = tokenizer(text, return_tensors="pt") - for i in inputs: - inputs[i] = inputs[i].to(device) - res = models[device](**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() - assert inputs["input_ids"].shape[-1] == len(word2ph) - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/chinese-roberta-wwm-ext-large/README.md b/spaces/Mahiruoshi/MyGO_VIts-bert/bert/chinese-roberta-wwm-ext-large/README.md deleted file mode 100644 index 7bce039b7f81ee328fdf8efe3f14409200aacbef..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/chinese-roberta-wwm-ext-large/README.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -language: -- zh -tags: -- bert -license: "apache-2.0" ---- - -# Please use 'Bert' related functions to load this model! - -## Chinese BERT with Whole Word Masking -For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**. - -**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)** -Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu - -This repository is developed based on:https://github.com/google-research/bert - -You may also interested in, -- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm -- Chinese MacBERT: https://github.com/ymcui/MacBERT -- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA -- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet -- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer - -More resources by HFL: https://github.com/ymcui/HFL-Anthology - -## Citation -If you find the technical report or resource is useful, please cite the following technical report in your paper. -- Primary: https://arxiv.org/abs/2004.13922 -``` -@inproceedings{cui-etal-2020-revisiting, - title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing", - author = "Cui, Yiming and - Che, Wanxiang and - Liu, Ting and - Qin, Bing and - Wang, Shijin and - Hu, Guoping", - booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", - month = nov, - year = "2020", - address = "Online", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58", - pages = "657--668", -} -``` -- Secondary: https://arxiv.org/abs/1906.08101 -``` -@article{chinese-bert-wwm, - title={Pre-Training with Whole Word Masking for Chinese BERT}, - author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping}, - journal={arXiv preprint arXiv:1906.08101}, - year={2019} - } -``` \ No newline at end of file diff --git a/spaces/Mashir0/pximg/middleware/illust.js b/spaces/Mashir0/pximg/middleware/illust.js deleted file mode 100644 index 54fbeb975f4c503fe89c121023beebf51f601ad4..0000000000000000000000000000000000000000 --- a/spaces/Mashir0/pximg/middleware/illust.js +++ /dev/null @@ -1,37 +0,0 @@ -const NodeCache = require('node-cache'); -const { pixivReverseProxy, getIllustPages, convertPage, getPixivErrorMsg } = require('../utils/pixiv'); - -const illustCache = new NodeCache({ stdTTL: 3600, checkperiod: 60, useClones: false }); - -/** - * @type {import('koa-router').IMiddleware} - */ -module.exports = async ctx => { - const { 0: size = 'original', 1: pid, 2: p = 0 } = ctx.params; - let urls = illustCache.get(pid); - - if (!urls) { - try { - urls = await getIllustPages(pid, { language: ctx.headers['accept-language'] }); - illustCache.set(pid, urls); - } catch (error) { - ctx.body = getPixivErrorMsg(error); - ctx.status = 502; - ctx.set('cache-control', 'no-cache'); - return; - } - } - - try { - if (!urls[p]) { - ctx.status = 404; - return; - } - const path = new URL(convertPage(urls[p], size)).pathname; - const paths = path.split('/'); - const filename = paths[paths.length - 1]; - return pixivReverseProxy(ctx, path, () => ctx.set('content-disposition', `filename="${filename}"`)); - } catch { - ctx.status = 404; - } -}; diff --git a/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_llm.py b/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_llm.py deleted file mode 100644 index 869aed125cfb8cd7a69ed02eeb389cc72a3e296b..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_llm.py +++ /dev/null @@ -1,220 +0,0 @@ -"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance -of the ChatGPT API or LLM models.""" -from __future__ import annotations - -import contextlib -import json -from typing import Any, Dict - -from colorama import Fore -from regex import regex - -from autogpt.config import Config -from autogpt.json_utils.json_fix_general import correct_json -from autogpt.llm_utils import call_ai_function -from autogpt.logs import logger -from autogpt.speech import say_text - -JSON_SCHEMA = """ -{ - "command": { - "name": "command name", - "args": { - "arg name": "value" - } - }, - "thoughts": - { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user" - } -} -""" - -CFG = Config() - - -def auto_fix_json(json_string: str, schema: str) -> str: - """Fix the given JSON string to make it parseable and fully compliant with - the provided schema using GPT-3. - - Args: - json_string (str): The JSON string to fix. - schema (str): The schema to use to fix the JSON. - Returns: - str: The fixed JSON string. - """ - # Try to fix the JSON using GPT: - function_string = "def fix_json(json_string: str, schema:str=None) -> str:" - args = [f"'''{json_string}'''", f"'''{schema}'''"] - description_string = ( - "This function takes a JSON string and ensures that it" - " is parseable and fully compliant with the provided schema. If an object" - " or field specified in the schema isn't contained within the correct JSON," - " it is omitted. The function also escapes any double quotes within JSON" - " string values to ensure that they are valid. If the JSON string contains" - " any None or NaN values, they are replaced with null before being parsed." - ) - - # If it doesn't already start with a "`", add one: - if not json_string.startswith("`"): - json_string = "```json\n" + json_string + "\n```" - result_string = call_ai_function( - function_string, args, description_string, model=CFG.fast_llm_model - ) - logger.debug("------------ JSON FIX ATTEMPT ---------------") - logger.debug(f"Original JSON: {json_string}") - logger.debug("-----------") - logger.debug(f"Fixed JSON: {result_string}") - logger.debug("----------- END OF FIX ATTEMPT ----------------") - - try: - json.loads(result_string) # just check the validity - return result_string - except json.JSONDecodeError: # noqa: E722 - # Get the call stack: - # import traceback - # call_stack = traceback.format_exc() - # print(f"Failed to fix JSON: '{json_string}' "+call_stack) - return "failed" - - -def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: - """Fix the given JSON string to make it parseable and fully compliant with two techniques. - - Args: - json_string (str): The JSON string to fix. - - Returns: - str: The fixed JSON string. - """ - - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - if assistant_reply_json == {}: - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - - if assistant_reply_json != {}: - return assistant_reply_json - - logger.error( - "Error: The following AI output couldn't be converted to a JSON:\n", - assistant_reply, - ) - if CFG.speak_mode: - say_text("I have received an invalid JSON response from the OpenAI API.") - - return {} - - -def fix_and_parse_json( - json_to_load: str, try_to_fix_with_gpt: bool = True -) -> Dict[Any, Any]: - """Fix and parse JSON string - - Args: - json_to_load (str): The JSON string. - try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT. - Defaults to True. - - Returns: - str or dict[Any, Any]: The parsed JSON. - """ - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = json_to_load.replace("\t", "") - return json.loads(json_to_load) - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = correct_json(json_to_load) - return json.loads(json_to_load) - # Let's do something manually: - # sometimes GPT responds with something BEFORE the braces: - # "I'm sorry, I don't understand. Please try again." - # {"text": "I'm sorry, I don't understand. Please try again.", - # "confidence": 0.0} - # So let's try to find the first brace and then parse the rest - # of the string - try: - brace_index = json_to_load.index("{") - maybe_fixed_json = json_to_load[brace_index:] - last_brace_index = maybe_fixed_json.rindex("}") - maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1] - return json.loads(maybe_fixed_json) - except (json.JSONDecodeError, ValueError) as e: - return try_ai_fix(try_to_fix_with_gpt, e, json_to_load) - - -def try_ai_fix( - try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str -) -> Dict[Any, Any]: - """Try to fix the JSON with the AI - - Args: - try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI. - exception (Exception): The exception that was raised. - json_to_load (str): The JSON string to load. - - Raises: - exception: If try_to_fix_with_gpt is False. - - Returns: - str or dict[Any, Any]: The JSON string or dictionary. - """ - if not try_to_fix_with_gpt: - raise exception - if CFG.debug_mode: - logger.warn( - "Warning: Failed to parse AI output, attempting to fix." - "\n If you see this warning frequently, it's likely that" - " your prompt is confusing the AI. Try changing it up" - " slightly." - ) - # Now try to fix this up using the ai_functions - ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA) - - if ai_fixed_json != "failed": - return json.loads(ai_fixed_json) - # This allows the AI to react to the error message, - # which usually results in it correcting its ways. - # logger.error("Failed to fix AI output, telling the AI.") - return {} - - -def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): - if CFG.speak_mode and CFG.debug_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API. " - "Trying to fix it now." - ) - logger.error("Attempting to fix JSON by finding outermost brackets\n") - - try: - json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") - json_match = json_pattern.search(json_string) - - if json_match: - # Extract the valid JSON object from the string - json_string = json_match.group(0) - logger.typewriter_log( - title="Apparently json was fixed.", title_color=Fore.GREEN - ) - if CFG.speak_mode and CFG.debug_mode: - say_text("Apparently json was fixed.") - else: - return {} - - except (json.JSONDecodeError, ValueError): - if CFG.debug_mode: - logger.error(f"Error: Invalid JSON: {json_string}\n") - if CFG.speak_mode: - say_text("Didn't work. I will have to ignore this response then.") - logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") - json_string = {} - - return fix_and_parse_json(json_string) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/tutorial_train.py b/spaces/Mellow-ai/PhotoAI_Mellow/tutorial_train.py deleted file mode 100644 index 393d7addb164c32eff9c3d675e4f32fb555868f0..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/tutorial_train.py +++ /dev/null @@ -1,35 +0,0 @@ -from share import * - -import pytorch_lightning as pl -from torch.utils.data import DataLoader -from tutorial_dataset import MyDataset -from cldm.logger import ImageLogger -from cldm.model import create_model, load_state_dict - - -# Configs -resume_path = './models/control_sd15_ini.ckpt' -batch_size = 4 -logger_freq = 300 -learning_rate = 1e-5 -sd_locked = True -only_mid_control = False - - -# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs. -model = create_model('./models/cldm_v15.yaml').cpu() -model.load_state_dict(load_state_dict(resume_path, location='cpu')) -model.learning_rate = learning_rate -model.sd_locked = sd_locked -model.only_mid_control = only_mid_control - - -# Misc -dataset = MyDataset() -dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True) -logger = ImageLogger(batch_frequency=logger_freq) -trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger]) - - -# Train! -trainer.fit(model, dataloader) diff --git a/spaces/MichaelT8093/Mandarin-TTS/bert/ProsodyModel.py b/spaces/MichaelT8093/Mandarin-TTS/bert/ProsodyModel.py deleted file mode 100644 index 5f305b41894a4a8cec05c23dcdd29a9b939b748b..0000000000000000000000000000000000000000 --- a/spaces/MichaelT8093/Mandarin-TTS/bert/ProsodyModel.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -import torch -import torch.nn as nn -import torch.nn.functional as F - -from transformers import BertModel, BertConfig, BertTokenizer - - -class CharEmbedding(nn.Module): - def __init__(self, model_dir): - super().__init__() - self.tokenizer = BertTokenizer.from_pretrained(model_dir) - self.bert_config = BertConfig.from_pretrained(model_dir) - self.hidden_size = self.bert_config.hidden_size - self.bert = BertModel(self.bert_config) - self.proj = nn.Linear(self.hidden_size, 256) - self.linear = nn.Linear(256, 3) - - def text2Token(self, text): - token = self.tokenizer.tokenize(text) - txtid = self.tokenizer.convert_tokens_to_ids(token) - return txtid - - def forward(self, inputs_ids, inputs_masks, tokens_type_ids): - out_seq = self.bert(input_ids=inputs_ids, - attention_mask=inputs_masks, - token_type_ids=tokens_type_ids)[0] - out_seq = self.proj(out_seq) - return out_seq - - -class TTSProsody(object): - def __init__(self, path, device): - self.device = device - self.char_model = CharEmbedding(path) - self.char_model.load_state_dict( - torch.load( - os.path.join(path, 'prosody_model.pt'), - map_location="cpu" - ), - strict=False - ) - self.char_model.eval() - self.char_model.to(self.device) - - def get_char_embeds(self, text): - input_ids = self.char_model.text2Token(text) - input_masks = [1] * len(input_ids) - type_ids = [0] * len(input_ids) - input_ids = torch.LongTensor([input_ids]).to(self.device) - input_masks = torch.LongTensor([input_masks]).to(self.device) - type_ids = torch.LongTensor([type_ids]).to(self.device) - - with torch.no_grad(): - char_embeds = self.char_model( - input_ids, input_masks, type_ids).squeeze(0).cpu() - return char_embeds - - def expand_for_phone(self, char_embeds, length): # length of phones for char - assert char_embeds.size(0) == len(length) - expand_vecs = list() - for vec, leng in zip(char_embeds, length): - vec = vec.expand(leng, -1) - expand_vecs.append(vec) - expand_embeds = torch.cat(expand_vecs, 0) - assert expand_embeds.size(0) == sum(length) - return expand_embeds.numpy() - - -if __name__ == "__main__": - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - prosody = TTSProsody('./bert/', device) - while True: - text = input("请输入文本:") - prosody.get_char_embeds(text) diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/README.md b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/README.md deleted file mode 100644 index 53e34df08da55169377741bd2e7676843237d810..0000000000000000000000000000000000000000 --- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: PIFu Clothed Human Digitization -emoji: "🧍🏽‍♀️🧍🏻🧍🏽‍♂️\_" -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.0.2 -app_file: ./PIFu/spaces.py -pinned: false -python_version: 3.7.13 -duplicated_from: radames/PIFu-Clothed-Human-Digitization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/__init__.py deleted file mode 100644 index abea668b3d52be16b5fe41ab20e3494885bba297..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .common import * # NOQA -from .kie import * # NOQA -from .textdet import * # NOQA -from .textrecog import * # NOQA diff --git a/spaces/MrBodean/VoiceClone/synthesizer_preprocess_embeds.py b/spaces/MrBodean/VoiceClone/synthesizer_preprocess_embeds.py deleted file mode 100644 index 94f864d5d3c36c6177b211f5818e7c920a41cd8c..0000000000000000000000000000000000000000 --- a/spaces/MrBodean/VoiceClone/synthesizer_preprocess_embeds.py +++ /dev/null @@ -1,25 +0,0 @@ -from synthesizer.preprocess import create_embeddings -from utils.argutils import print_args -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Creates embeddings for the synthesizer from the LibriSpeech utterances.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("synthesizer_root", type=Path, help=\ - "Path to the synthesizer training data that contains the audios and the train.txt file. " - "If you let everything as default, it should be /SV2TTS/synthesizer/.") - parser.add_argument("-e", "--encoder_model_fpath", type=Path, - default="encoder/saved_models/pretrained.pt", help=\ - "Path your trained encoder model.") - parser.add_argument("-n", "--n_processes", type=int, default=4, help= \ - "Number of parallel processes. An encoder is created for each, so you may need to lower " - "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.") - args = parser.parse_args() - - # Preprocess the dataset - print_args(args, parser) - create_embeddings(**vars(args)) diff --git a/spaces/Nyashi/rvc-models-epic/app.py b/spaces/Nyashi/rvc-models-epic/app.py deleted file mode 100644 index e9764db6d391656a90e71f3b69ac5dc71a78b23b..0000000000000000000000000000000000000000 --- a/spaces/Nyashi/rvc-models-epic/app.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import glob -import json -import argparse -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -from datetime import datetime -from fairseq import checkpoint_utils -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono -from vc_infer_pipeline import VC -from config import Config -config = Config() -logging.getLogger("numba").setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces - -def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index): - def vc_fn( - input_audio, - f0_up_key, - f0_method, - index_rate, - tts_mode, - tts_text, - tts_voice - ): - try: - if tts_mode: - if len(tts_text) > 100 and limitation: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - else: - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if duration > 20 and limitation: - return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - f0_file=None, - ) - print( - f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - ) - return "Perfecto", (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - return vc_fn - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_to_tts_mode(tts_mode): - if tts_mode: - return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) - else: - return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - -if __name__ == '__main__': - load_hubert() - models = [] - tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - if True: - folder_path = "weights" - for name in os.listdir(folder_path): - print("Comprobando carpeta: " + name) - if name.startswith("."): break - if name.endswith(".json"): - continue - - - cover_path = glob.glob(f"{folder_path}/{name}/*.png") + glob.glob(f"{folder_path}/{name}/*.jpg") - index_path = glob.glob(f"{folder_path}/{name}/*.index") - checkpoint_path = glob.glob(f"{folder_path}/{name}/*.pth") - title = name - author = "" - if cover_path: - cover = cover_path[0] - else: - cover = "" - index = index_path[0] - cpt = torch.load(checkpoint_path[0], map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩 - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index))) - with gr.Blocks() as app: - gr.Markdown( - "#
RVC GURA Model (Ultima Actualización)\n" - "##
El audio introducido debe estar limpio,que no contenga musica de fondo, mala calidad, ruidos, estatica\n" - "###
[Recomiendo utilizar google colab para mejores funcionalidades, pero no habra gura ahi](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing) \n" - #"####
Please regenerate your model to latest RVC to fully applied this new rvc.\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n" - "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)" - ) - with gr.Tabs(): - for (name, title, author, cover, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
' - f'
{title}
\n'+ - (f'
Model author: {author}
' if author else "")+ - (f'' if cover else "")+ - '
' - ) - with gr.Row(): - with gr.Column(): - vc_input = gr.Audio(label="Audio de entrada"+' (menos de 20 segundos)' if limitation else '') - vc_transpose = gr.Number(label="Transpose", value=0) - vc_f0method = gr.Radio( - label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies /Algoritmo de extracción de tono, PM es rápido pero Harvest es mejor para frecuencias bajas", - choices=["pm", "harvest"], - value="pm", - interactive=True, - ) - vc_index_ratio = gr.Slider( - minimum=0, - maximum=1, - label="Relacion respecto al audio", - value=0.6, - interactive=True, - ) - tts_mode = gr.Checkbox(label="Texto a Voz (usa edge-tts como entrada)", value=False) - tts_text = gr.Textbox(visible=False,label="Texto (100 palabras limite)" if limitation else "Texto TTS") - tts_voice = gr.Dropdown(label="Hablante Edge-tts", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - vc_submit = gr.Button("Generar", variant="primary") - with gr.Column(): - vc_output1 = gr.Textbox(label="Mensaje de salida") - vc_output2 = gr.Audio(label="Audio de salida") - vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2],api_name=f"generar") - tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice]) - gr.Markdown('#
Changelog 2023.05.15') - gr.Markdown('- Added support for direct upload to gradio') - gr.Markdown('- Added Gura,Haachama,Selena') - gr.Markdown('- Minor fix and adjustment') - app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab) \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/data/mm_data/caption_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/data/mm_data/caption_dataset.py deleted file mode 100644 index 2109b19ec0958b5a84429b412d4f62052324147c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/data/mm_data/caption_dataset.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -from io import BytesIO - -import logging -import warnings -import string - -import numpy as np -import torch -import base64 -from torchvision import transforms - -from PIL import Image, ImageFile - -from data import data_utils -from data.ofa_dataset import OFADataset - -ImageFile.LOAD_TRUNCATED_IMAGES = True -ImageFile.MAX_IMAGE_PIXELS = None -Image.MAX_IMAGE_PIXELS = None - -logger = logging.getLogger(__name__) -warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning) - -IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) -IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) - - -def collate(samples, pad_idx, eos_idx): - if len(samples) == 0: - return {} - - def merge(key): - return data_utils.collate_tokens( - [s[key] for s in samples], - pad_idx, - eos_idx=eos_idx, - ) - - id = np.array([s["id"] for s in samples]) - src_tokens = merge("source") - src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples]) - - patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0) - patch_masks = torch.cat([sample['patch_mask'] for sample in samples]) - - prev_output_tokens = None - target = None - if samples[0].get("target", None) is not None: - target = merge("target") - tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples]) - ntokens = tgt_lengths.sum().item() - - if samples[0].get("prev_output_tokens", None) is not None: - prev_output_tokens = merge("prev_output_tokens") - else: - ntokens = src_lengths.sum().item() - - batch = { - "id": id, - "nsentences": len(samples), - "ntokens": ntokens, - "net_input": { - "src_tokens": src_tokens, - "src_lengths": src_lengths, - "patch_images": patch_images, - "patch_masks": patch_masks, - "prev_output_tokens": prev_output_tokens - }, - "target": target, - } - - return batch - - -class CaptionDataset(OFADataset): - def __init__( - self, - split, - dataset, - bpe, - src_dict, - tgt_dict=None, - max_src_length=128, - max_tgt_length=30, - patch_image_size=224, - imagenet_default_mean_and_std=False, - scst=False - ): - super().__init__(split, dataset, bpe, src_dict, tgt_dict) - self.max_src_length = max_src_length - self.max_tgt_length = max_tgt_length - self.patch_image_size = patch_image_size - self.scst = scst - - self.transtab = str.maketrans({key: None for key in string.punctuation}) - - if imagenet_default_mean_and_std: - mean = IMAGENET_DEFAULT_MEAN - std = IMAGENET_DEFAULT_STD - else: - mean = [0.5, 0.5, 0.5] - std = [0.5, 0.5, 0.5] - - self.patch_resize_transform = transforms.Compose([ - lambda image: image.convert("RGB"), - transforms.Resize((patch_image_size, patch_image_size), interpolation=Image.BICUBIC), - transforms.ToTensor(), - transforms.Normalize(mean=mean, std=std), - ]) - - def __getitem__(self, index): - uniq_id, image, caption = self.dataset[index] - - image = Image.open(BytesIO(base64.urlsafe_b64decode(image))) - patch_image = self.patch_resize_transform(image) - patch_mask = torch.tensor([True]) - - if self.split == 'train' and not self.scst: - caption = caption.translate(self.transtab).strip() - caption_token_list = caption.strip().split() - tgt_caption = ' '.join(caption_token_list[:self.max_tgt_length]) - else: - caption = ' '.join(caption.strip().split()) - caption_list = [cap.translate(self.transtab).strip() for cap in caption.strip().split('&&')] - tgt_caption = '&&'.join(caption_list) - src_item = self.encode_text(" what does the image describe?") - tgt_item = self.encode_text(" {}".format(tgt_caption)) - - src_item = torch.cat([self.bos_item, src_item, self.eos_item]) - target_item = torch.cat([tgt_item, self.eos_item]) - prev_output_item = torch.cat([self.bos_item, tgt_item]) - - example = { - "id": uniq_id, - "source": src_item, - "patch_image": patch_image, - "patch_mask": patch_mask, - "target": target_item, - "prev_output_tokens": prev_output_item - } - return example - - def collater(self, samples, pad_to_length=None): - """Merge a list of samples to form a mini-batch. - Args: - samples (List[dict]): samples to collate - Returns: - dict: a mini-batch with the following keys: - """ - return collate(samples, pad_idx=self.pad, eos_idx=self.eos) \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/read_binarized.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/read_binarized.py deleted file mode 100644 index a414095d03fb022a6753e816fc8bfd80e11db24d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/read_binarized.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse - -from fairseq.data import Dictionary, data_utils, indexed_dataset - - -def get_parser(): - parser = argparse.ArgumentParser( - description="writes text from binarized file to stdout" - ) - # fmt: off - parser.add_argument('--dataset-impl', help='dataset implementation', - choices=indexed_dataset.get_available_dataset_impl()) - parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) - parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') - # fmt: on - - return parser - - -def main(): - parser = get_parser() - args = parser.parse_args() - - dictionary = Dictionary.load(args.dict) if args.dict is not None else None - dataset = data_utils.load_indexed_dataset( - args.input, - dictionary, - dataset_impl=args.dataset_impl, - default="lazy", - ) - - for tensor_line in dataset: - if dictionary is None: - line = " ".join([str(int(x)) for x in tensor_line]) - else: - line = dictionary.string(tensor_line) - - print(line) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/rerank_tune.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/rerank_tune.py deleted file mode 100644 index b2e8b7594a370b2462f77252d54d7ef80e290f7c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/rerank_tune.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import random - -import numpy as np -from fairseq import options - -from examples.noisychannel import rerank, rerank_options - - -def random_search(args): - param_values = [] - tuneable_parameters = ["lenpen", "weight1", "weight2", "weight3"] - initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3] - for i, elem in enumerate(initial_params): - if type(elem) is not list: - initial_params[i] = [elem] - else: - initial_params[i] = elem - - tune_parameters = args.tune_param.copy() - for i in range(len(args.tune_param)): - assert args.upper_bound[i] >= args.lower_bound[i] - index = tuneable_parameters.index(args.tune_param[i]) - del tuneable_parameters[index] - del initial_params[index] - - tune_parameters += tuneable_parameters - param_values += initial_params - random.seed(args.seed) - - random_params = np.array( - [ - [ - random.uniform(args.lower_bound[i], args.upper_bound[i]) - for i in range(len(args.tune_param)) - ] - for k in range(args.num_trials) - ] - ) - set_params = np.array( - [ - [initial_params[i][0] for i in range(len(tuneable_parameters))] - for k in range(args.num_trials) - ] - ) - random_params = np.concatenate((random_params, set_params), 1) - - rerank_args = vars(args).copy() - if args.nbest_list: - rerank_args["gen_subset"] = "test" - else: - rerank_args["gen_subset"] = args.tune_subset - - for k in range(len(tune_parameters)): - rerank_args[tune_parameters[k]] = list(random_params[:, k]) - - if args.share_weights: - k = tune_parameters.index("weight2") - rerank_args["weight3"] = list(random_params[:, k]) - - rerank_args = argparse.Namespace(**rerank_args) - best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank( - rerank_args - ) - rerank_args = vars(args).copy() - rerank_args["lenpen"] = [best_lenpen] - rerank_args["weight1"] = [best_weight1] - rerank_args["weight2"] = [best_weight2] - rerank_args["weight3"] = [best_weight3] - - # write the hypothesis from the valid set from the best trial - - if args.gen_subset != "valid": - rerank_args["gen_subset"] = "valid" - rerank_args = argparse.Namespace(**rerank_args) - rerank.rerank(rerank_args) - - # test with the best hyperparameters on gen subset - rerank_args = vars(args).copy() - rerank_args["gen_subset"] = args.gen_subset - rerank_args["lenpen"] = [best_lenpen] - rerank_args["weight1"] = [best_weight1] - rerank_args["weight2"] = [best_weight2] - rerank_args["weight3"] = [best_weight3] - rerank_args = argparse.Namespace(**rerank_args) - rerank.rerank(rerank_args) - - -def cli_main(): - parser = rerank_options.get_tuning_parser() - args = options.parse_args_and_arch(parser) - - random_search(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py deleted file mode 100644 index 61dbb112bfd5ea7b92f2739f046910f486bb0153..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py +++ /dev/null @@ -1,198 +0,0 @@ -from typing import Optional -import torch -from torch import Tensor - -from examples.simultaneous_translation.utils.functions import ( - exclusive_cumprod, - prob_check, - moving_sum, -) - - -def expected_alignment_from_p_choose( - p_choose: Tensor, - padding_mask: Optional[Tensor] = None, - eps: float = 1e-6 -): - """ - Calculating expected alignment for from stepwise probability - - Reference: - Online and Linear-Time Attention by Enforcing Monotonic Alignments - https://arxiv.org/pdf/1704.00784.pdf - - q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} - a_ij = p_ij q_ij - - Parallel solution: - ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) - - ============================================================ - Expected input size - p_choose: bsz, tgt_len, src_len - """ - prob_check(p_choose) - - # p_choose: bsz, tgt_len, src_len - bsz, tgt_len, src_len = p_choose.size() - dtype = p_choose.dtype - - p_choose = p_choose.float() - - if padding_mask is not None: - p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0) - - # cumprod_1mp : bsz, tgt_len, src_len - cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=eps) - cumprod_1mp_clamp = torch.clamp(cumprod_1mp, eps, 1.0) - - alpha_0 = p_choose.new_zeros([bsz, 1, src_len]) - alpha_0[:, :, 0] = 1.0 - - previous_alpha = [alpha_0] - - for i in range(tgt_len): - # p_choose: bsz , tgt_len, src_len - # cumprod_1mp_clamp : bsz, tgt_len, src_len - # previous_alpha[i]: bsz, 1, src_len - # alpha_i: bsz, src_len - alpha_i = ( - p_choose[:, i] - * cumprod_1mp[:, i] - * torch.cumsum( - previous_alpha[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1 - ) - ).clamp(0, 1.0) - - previous_alpha.append(alpha_i.unsqueeze(1)) - - # alpha: bsz * num_heads, tgt_len, src_len - alpha = torch.cat(previous_alpha[1:], dim=1) - - # Mix precision to prevent overflow for fp16 - alpha = alpha.type(dtype) - - prob_check(alpha) - - return alpha - - -def expected_soft_attention( - alpha: Tensor, - soft_energy: Tensor, - padding_mask: Optional[Tensor] = None, - chunk_size: Optional[int] = None, - eps: float = 1e-10 -): - """ - Function to compute expected soft attention for - monotonic infinite lookback attention from - expected alignment and soft energy. - - Reference: - Monotonic Chunkwise Attention - https://arxiv.org/abs/1712.05382 - - Monotonic Infinite Lookback Attention for Simultaneous Machine Translation - https://arxiv.org/abs/1906.05218 - - alpha: bsz, tgt_len, src_len - soft_energy: bsz, tgt_len, src_len - padding_mask: bsz, src_len - left_padding: bool - """ - if padding_mask is not None: - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) - soft_energy = soft_energy.masked_fill( - padding_mask.unsqueeze(1), -float("inf") - ) - - prob_check(alpha) - - dtype = alpha.dtype - - alpha = alpha.float() - soft_energy = soft_energy.float() - - soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] - exp_soft_energy = torch.exp(soft_energy) + eps - - if chunk_size is not None: - # Chunkwise - beta = ( - exp_soft_energy - * moving_sum( - alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)), - 1, chunk_size - ) - ) - else: - # Infinite lookback - # Notice that infinite lookback is a special case of chunkwise - # where chunksize = inf - inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2)) - - beta = ( - exp_soft_energy - * torch.cumsum(inner_items.flip(dims=[2]), dim=2) - .flip(dims=[2]) - ) - - if padding_mask is not None: - beta = beta.masked_fill( - padding_mask.unsqueeze(1).to(torch.bool), 0.0) - - # Mix precision to prevent overflow for fp16 - beta = beta.type(dtype) - - beta = beta.clamp(0, 1) - - prob_check(beta) - - return beta - - -def mass_preservation( - alpha: Tensor, - padding_mask: Optional[Tensor] = None, - left_padding: bool = False -): - """ - Function to compute the mass perservation for alpha. - This means that the residual weights of alpha will be assigned - to the last token. - - Reference: - Monotonic Infinite Lookback Attention for Simultaneous Machine Translation - https://arxiv.org/abs/1906.05218 - - alpha: bsz, tgt_len, src_len - padding_mask: bsz, src_len - left_padding: bool - """ - - prob_check(alpha) - - if padding_mask is not None: - if not left_padding: - assert not padding_mask[:, 0].any(), ( - "Find padding on the beginning of the sequence." - ) - alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) - - if left_padding or padding_mask is None: - residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1) - alpha[:, :, -1] = residuals - else: - # right padding - _, tgt_len, src_len = alpha.size() - residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1) - src_lens = src_len - padding_mask.sum(dim=1, keepdim=True) - src_lens = src_lens.expand(-1, tgt_len).contiguous() - # add back the last value - residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1) - alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals) - - prob_check(alpha) - - return alpha diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/fairseq_incremental_decoder.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/fairseq_incremental_decoder.py deleted file mode 100644 index cc72a0f8f3da238a8ce846240e5008d91ce1bc1a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/fairseq_incremental_decoder.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from typing import Dict, Optional - -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.models import FairseqDecoder -from torch import Tensor - - -logger = logging.getLogger(__name__) - - -@with_incremental_state -class FairseqIncrementalDecoder(FairseqDecoder): - """Base class for incremental decoders. - - Incremental decoding is a special mode at inference time where the Model - only receives a single timestep of input corresponding to the previous - output token (for teacher forcing) and must produce the next output - *incrementally*. Thus the model must cache any long-term state that is - needed about the sequence, e.g., hidden states, convolutional states, etc. - - Compared to the standard :class:`FairseqDecoder` interface, the incremental - decoder interface allows :func:`forward` functions to take an extra keyword - argument (*incremental_state*) that can be used to cache state across - time-steps. - - The :class:`FairseqIncrementalDecoder` interface also defines the - :func:`reorder_incremental_state` method, which is used during beam search - to select and reorder the incremental state based on the selection of beams. - - To learn more about how incremental decoding works, refer to `this blog - `_. - """ - - def __init__(self, dictionary): - super().__init__(dictionary) - - def forward( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs - ): - """ - Args: - prev_output_tokens (LongTensor): shifted output tokens of shape - `(batch, tgt_len)`, for teacher forcing - encoder_out (dict, optional): output from the encoder, used for - encoder-side attention - incremental_state (dict, optional): dictionary used for storing - state during :ref:`Incremental decoding` - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - raise NotImplementedError - - def extract_features( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs - ): - """ - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - raise NotImplementedError - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - pass - - def reorder_incremental_state_scripting( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Main entry point for reordering the incremental state. - - Due to limitations in TorchScript, we call this function in - :class:`fairseq.sequence_generator.SequenceGenerator` instead of - calling :func:`reorder_incremental_state` directly. - """ - for module in self.modules(): - if hasattr(module, "reorder_incremental_state"): - result = module.reorder_incremental_state(incremental_state, new_order) - if result is not None: - incremental_state = result - - def set_beam_size(self, beam_size): - """Sets the beam size in the decoder and all children.""" - if getattr(self, "_beam_size", -1) != beam_size: - seen = set() - - def apply_set_beam_size(module): - if ( - module != self - and hasattr(module, "set_beam_size") - and module not in seen - ): - seen.add(module) - module.set_beam_size(beam_size) - - self.apply(apply_set_beam_size) - self._beam_size = beam_size diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/joint_alignment_translation/README.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/joint_alignment_translation/README.md deleted file mode 100644 index cd9c0ea65f5292198296a8f427b42e01b584e2d9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/joint_alignment_translation/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019) - -This page includes instructions for training models described in [Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019)](https://arxiv.org/abs/1909.02074). - -## Training a joint alignment-translation model on WMT'18 En-De - -##### 1. Extract and preprocess the WMT'18 En-De data -```bash -./prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh -``` - -##### 2. Generate alignments from statistical alignment toolkits e.g. Giza++/FastAlign. -In this example, we use FastAlign. -```bash -git clone git@github.com:clab/fast_align.git -pushd fast_align -mkdir build -cd build -cmake .. -make -popd -ALIGN=fast_align/build/fast_align -paste bpe.32k/train.en bpe.32k/train.de | awk -F '\t' '{print $1 " ||| " $2}' > bpe.32k/train.en-de -$ALIGN -i bpe.32k/train.en-de -d -o -v > bpe.32k/train.align -``` - -##### 3. Preprocess the dataset with the above generated alignments. -```bash -fairseq-preprocess \ - --source-lang en --target-lang de \ - --trainpref bpe.32k/train \ - --validpref bpe.32k/valid \ - --testpref bpe.32k/test \ - --align-suffix align \ - --destdir binarized/ \ - --joined-dictionary \ - --workers 32 -``` - -##### 4. Train a model -```bash -fairseq-train \ - binarized \ - --arch transformer_wmt_en_de_big_align --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --activation-fn relu\ - --lr 0.0002 --lr-scheduler inverse_sqrt --warmup-updates 4000 --warmup-init-lr 1e-07 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 3500 --label-smoothing 0.1 \ - --save-dir ./checkpoints --log-interval 1000 --max-update 60000 \ - --keep-interval-updates -1 --save-interval-updates 0 \ - --load-alignments --criterion label_smoothed_cross_entropy_with_alignment \ - --fp16 -``` - -Note that the `--fp16` flag requires you have CUDA 9.1 or greater and a Volta GPU or newer. - -If you want to train the above model with big batches (assuming your machine has 8 GPUs): -- add `--update-freq 8` to simulate training on 8x8=64 GPUs -- increase the learning rate; 0.0007 works well for big batches - -##### 5. Evaluate and generate the alignments (BPE level) -```bash -fairseq-generate \ - binarized --gen-subset test --print-alignment \ - --source-lang en --target-lang de \ - --path checkpoints/checkpoint_best.pt --beam 5 --nbest 1 -``` - -##### 6. Other resources. -The code for: -1. preparing alignment test sets -2. converting BPE level alignments to token level alignments -3. symmetrizing bidirectional alignments -4. evaluating alignments using AER metric -can be found [here](https://github.com/lilt/alignment-scripts) - -## Citation - -```bibtex -@inproceedings{garg2019jointly, - title = {Jointly Learning to Align and Translate with Transformer Models}, - author = {Garg, Sarthak and Peitz, Stephan and Nallasamy, Udhyakumar and Paulik, Matthias}, - booktitle = {Conference on Empirical Methods in Natural Language Processing (EMNLP)}, - address = {Hong Kong}, - month = {November}, - url = {https://arxiv.org/abs/1909.02074}, - year = {2019}, -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quant_noise.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quant_noise.py deleted file mode 100644 index d777dfbb6c1bf6a9b769dfdaec35d5ef084c8a8b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quant_noise.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn - - -def quant_noise(module, p, block_size): - """ - Wraps modules and applies quantization noise to the weights for - subsequent quantization with Iterative Product Quantization as - described in "Training with Quantization Noise for Extreme Model Compression" - - Args: - - module: nn.Module - - p: amount of Quantization Noise - - block_size: size of the blocks for subsequent quantization with iPQ - - Remarks: - - Module weights must have the right sizes wrt the block size - - Only Linear, Embedding and Conv2d modules are supported for the moment - - For more detail on how to quantize by blocks with convolutional weights, - see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - - We implement the simplest form of noise here as stated in the paper - which consists in randomly dropping blocks - """ - - # if no quantization noise, don't register hook - if p <= 0: - return module - - # supported modules - assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) - - # test whether module.weight has the right sizes wrt block_size - is_conv = module.weight.ndim == 4 - - # 2D matrix - if not is_conv: - assert ( - module.weight.size(1) % block_size == 0 - ), "Input features must be a multiple of block sizes" - - # 4D matrix - else: - # 1x1 convolutions - if module.kernel_size == (1, 1): - assert ( - module.in_channels % block_size == 0 - ), "Input channels must be a multiple of block sizes" - # regular convolutions - else: - k = module.kernel_size[0] * module.kernel_size[1] - assert k % block_size == 0, "Kernel size must be a multiple of block size" - - def _forward_pre_hook(mod, input): - # no noise for evaluation - if mod.training: - if not is_conv: - # gather weight and sizes - weight = mod.weight - in_features = weight.size(1) - out_features = weight.size(0) - - # split weight matrix into blocks and randomly drop selected blocks - mask = torch.zeros( - in_features // block_size * out_features, device=weight.device - ) - mask.bernoulli_(p) - mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) - - else: - # gather weight and sizes - weight = mod.weight - in_channels = mod.in_channels - out_channels = mod.out_channels - - # split weight matrix into blocks and randomly drop selected blocks - if mod.kernel_size == (1, 1): - mask = torch.zeros( - int(in_channels // block_size * out_channels), - device=weight.device, - ) - mask.bernoulli_(p) - mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) - else: - mask = torch.zeros( - weight.size(0), weight.size(1), device=weight.device - ) - mask.bernoulli_(p) - mask = ( - mask.unsqueeze(2) - .unsqueeze(3) - .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) - ) - - # scale weights and apply mask - mask = mask.to( - torch.bool - ) # x.bool() is not currently supported in TorchScript - s = 1 / (1 - p) - mod.weight.data = s * weight.masked_fill(mask, 0) - - module.register_forward_pre_hook(_forward_pre_hook) - return module diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py deleted file mode 100644 index ac6340fa0744a08d2b527972dfc669573fb4e1c3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from argparse import Namespace - -from fairseq.dataclass.utils import gen_parser_from_dataclass -from fairseq.optim import FairseqOptimizer - - -class FairseqLRScheduler(object): - def __init__(self, cfg, optimizer): - super().__init__() - if optimizer is not None and not isinstance(optimizer, FairseqOptimizer): - raise ValueError("optimizer must be an instance of FairseqOptimizer") - self.cfg = cfg - self.optimizer = optimizer - self.best = None - - @classmethod - def add_args(cls, parser): - """Add arguments to the parser for this LR scheduler.""" - dc = getattr(cls, "__dataclass", None) - if dc is not None: - gen_parser_from_dataclass(parser, dc()) - - def state_dict(self): - """Return the LR scheduler state dict.""" - return {"best": self.best} - - def load_state_dict(self, state_dict): - """Load an LR scheduler state dict.""" - self.best = state_dict["best"] - - def step_begin_epoch(self, epoch): - """Update the learning rate at the beginning of the given epoch.""" - pass - - def step(self, epoch, val_loss=None): - """Update the learning rate at the end of the given epoch.""" - if val_loss is not None: - if self.best is None: - self.best = val_loss - else: - self.best = min(self.best, val_loss) - - def step_update(self, num_updates): - """Update the learning rate after each update.""" - return self.optimizer.get_lr() - - def reinit(self, total_num_update, num_updates): - pass - - -class LegacyFairseqLRScheduler(FairseqLRScheduler): - def __init__(self, args: Namespace, optimizer): - if not isinstance(optimizer, FairseqOptimizer): - raise ValueError("optimizer must be an instance of FairseqOptimizer") - self.args = args - self.optimizer = optimizer - self.best = None diff --git a/spaces/Open-Orca/LlongOrca-7B-16k/README.md b/spaces/Open-Orca/LlongOrca-7B-16k/README.md deleted file mode 100644 index a693b72565da3176c2f7bf1887931d81e08e0a53..0000000000000000000000000000000000000000 --- a/spaces/Open-Orca/LlongOrca-7B-16k/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LlongOrca-7B-16k -emoji: 🐳 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -duplicated_from: Open-Orca/OpenOrcaxOpenChat-Preview2-13B ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h deleted file mode 100644 index 3bf383b8ed9b358b5313d433a9682c294dfb77e4..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once -#include - -namespace detectron2 { - -at::Tensor box_iou_rotated_cpu( - const at::Tensor& boxes1, - const at::Tensor& boxes2); - -#if defined(WITH_CUDA) || defined(WITH_HIP) -at::Tensor box_iou_rotated_cuda( - const at::Tensor& boxes1, - const at::Tensor& boxes2); -#endif - -// Interface for Python -// inline is needed to prevent multiple function definitions when this header is -// included by different cpps -inline at::Tensor box_iou_rotated( - const at::Tensor& boxes1, - const at::Tensor& boxes2) { - assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); - if (boxes1.device().is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); -#else - AT_ERROR("Detectron2 is not compiled with GPU support!"); -#endif - } - - return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); -} - -} // namespace detectron2 diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/countless2d.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/countless2d.py deleted file mode 100644 index dc27b73affa20ab1a8a199542469a10aaf1f555a..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/countless2d.py +++ /dev/null @@ -1,529 +0,0 @@ -from __future__ import print_function, division - -""" -COUNTLESS performance test in Python. - -python countless2d.py ./images/NAMEOFIMAGE -""" - -import six -from six.moves import range -from collections import defaultdict -from functools import reduce -import operator -import io -import os -from PIL import Image -import math -import numpy as np -import random -import sys -import time -from tqdm import tqdm -from scipy import ndimage - -def simplest_countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab = a * (a == b) # PICK(A,B) - ac = a * (a == c) # PICK(A,C) - bc = b * (b == c) # PICK(B,C) - - a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed - - return a + (a == 0) * d # AB || AC || BC || D - -def quick_countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization - bc = b * (b == c) # PICK(B,C) - - a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C) - return a + (a == 0) * d # AB || AC || BC || D - -def quickest_countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization - ab_ac |= b * (b == c) # PICK(B,C) - return ab_ac + (ab_ac == 0) * d # AB || AC || BC || D - -def quick_countless_xor(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab = a ^ (a ^ b) # a or b - ab += (ab != a) * ((ab ^ (ab ^ c)) - b) # b or c - ab += (ab == c) * ((ab ^ (ab ^ d)) - c) # c or d - return ab - -def stippled_countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm - that treats zero as "background" and inflates lone - pixels. - - data is a 2D numpy array with even dimensions. - """ - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization - ab_ac |= b * (b == c) # PICK(B,C) - - nonzero = a + (a == 0) * (b + (b == 0) * c) - return ab_ac + (ab_ac == 0) * (d + (d == 0) * nonzero) # AB || AC || BC || D - -def zero_corrected_countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - # allows us to prevent losing 1/2 a bit of information - # at the top end by using a bigger type. Without this 255 is handled incorrectly. - data, upgraded = upgrade_type(data) - - # offset from zero, raw countless doesn't handle 0 correctly - # we'll remove the extra 1 at the end. - data += 1 - - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab = a * (a == b) # PICK(A,B) - ac = a * (a == c) # PICK(A,C) - bc = b * (b == c) # PICK(B,C) - - a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed - - result = a + (a == 0) * d - 1 # a or d - 1 - - if upgraded: - return downgrade_type(result) - - # only need to reset data if we weren't upgraded - # b/c no copy was made in that case - data -= 1 - - return result - -def countless_extreme(data): - nonzeros = np.count_nonzero(data) - # print("nonzeros", nonzeros) - - N = reduce(operator.mul, data.shape) - - if nonzeros == N: - print("quick") - return quick_countless(data) - elif np.count_nonzero(data + 1) == N: - print("quick") - # print("upper", nonzeros) - return quick_countless(data) - else: - return countless(data) - - -def countless(data): - """ - Vectorized implementation of downsampling a 2D - image by 2 on each side using the COUNTLESS algorithm. - - data is a 2D numpy array with even dimensions. - """ - # allows us to prevent losing 1/2 a bit of information - # at the top end by using a bigger type. Without this 255 is handled incorrectly. - data, upgraded = upgrade_type(data) - - # offset from zero, raw countless doesn't handle 0 correctly - # we'll remove the extra 1 at the end. - data += 1 - - sections = [] - - # This loop splits the 2D array apart into four arrays that are - # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), - # and (1,1) representing the A, B, C, and D positions from Figure 1. - factor = (2,2) - for offset in np.ndindex(factor): - part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - a, b, c, d = sections - - ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization - ab_ac |= b * (b == c) # PICK(B,C) - result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1 - - if upgraded: - return downgrade_type(result) - - # only need to reset data if we weren't upgraded - # b/c no copy was made in that case - data -= 1 - - return result - -def upgrade_type(arr): - dtype = arr.dtype - - if dtype == np.uint8: - return arr.astype(np.uint16), True - elif dtype == np.uint16: - return arr.astype(np.uint32), True - elif dtype == np.uint32: - return arr.astype(np.uint64), True - - return arr, False - -def downgrade_type(arr): - dtype = arr.dtype - - if dtype == np.uint64: - return arr.astype(np.uint32) - elif dtype == np.uint32: - return arr.astype(np.uint16) - elif dtype == np.uint16: - return arr.astype(np.uint8) - - return arr - -def odd_to_even(image): - """ - To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one. - Works by mirroring the starting 1 pixel edge of the image on odd shaped sides. - - e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled) - - For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample. - - """ - shape = np.array(image.shape) - - offset = (shape % 2)[:2] # x,y offset - - # detect if we're dealing with an even - # image. if so it's fine, just return. - if not np.any(offset): - return image - - oddshape = image.shape[:2] + offset - oddshape = np.append(oddshape, shape[2:]) - oddshape = oddshape.astype(int) - - newimg = np.empty(shape=oddshape, dtype=image.dtype) - - ox,oy = offset - sx,sy = oddshape - - newimg[0,0] = image[0,0] # corner - newimg[ox:sx,0] = image[:,0] # x axis line - newimg[0,oy:sy] = image[0,:] # y axis line - - return newimg - -def counting(array): - factor = (2, 2, 1) - shape = array.shape - - while len(shape) < 4: - array = np.expand_dims(array, axis=-1) - shape = array.shape - - output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor)) - output = np.zeros(output_shape, dtype=array.dtype) - - for chan in range(0, shape[3]): - for z in range(0, shape[2]): - for x in range(0, shape[0], 2): - for y in range(0, shape[1], 2): - block = array[ x:x+2, y:y+2, z, chan ] # 2x2 block - - hashtable = defaultdict(int) - for subx, suby in np.ndindex(block.shape[0], block.shape[1]): - hashtable[block[subx, suby]] += 1 - - best = (0, 0) - for segid, val in six.iteritems(hashtable): - if best[1] < val: - best = (segid, val) - - output[ x // 2, y // 2, chan ] = best[0] - - return output - -def ndzoom(array): - if len(array.shape) == 3: - ratio = ( 1 / 2.0, 1 / 2.0, 1.0 ) - else: - ratio = ( 1 / 2.0, 1 / 2.0) - return ndimage.interpolation.zoom(array, ratio, order=1) - -def countless_if(array): - factor = (2, 2, 1) - shape = array.shape - - if len(shape) < 3: - array = array[ :,:, np.newaxis ] - shape = array.shape - - output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor)) - output = np.zeros(output_shape, dtype=array.dtype) - - for chan in range(0, shape[2]): - for x in range(0, shape[0], 2): - for y in range(0, shape[1], 2): - block = array[ x:x+2, y:y+2, chan ] # 2x2 block - - if block[0,0] == block[1,0]: - pick = block[0,0] - elif block[0,0] == block[0,1]: - pick = block[0,0] - elif block[1,0] == block[0,1]: - pick = block[1,0] - else: - pick = block[1,1] - - output[ x // 2, y // 2, chan ] = pick - - return np.squeeze(output) - -def downsample_with_averaging(array): - """ - Downsample x by factor using averaging. - - @return: The downsampled array, of the same type as x. - """ - - if len(array.shape) == 3: - factor = (2,2,1) - else: - factor = (2,2) - - if np.array_equal(factor[:3], np.array([1,1,1])): - return array - - output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) - temp = np.zeros(output_shape, float) - counts = np.zeros(output_shape, np.int) - for offset in np.ndindex(factor): - part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - indexing_expr = tuple(np.s_[:s] for s in part.shape) - temp[indexing_expr] += part - counts[indexing_expr] += 1 - return np.cast[array.dtype](temp / counts) - -def downsample_with_max_pooling(array): - - factor = (2,2) - - if np.all(np.array(factor, int) == 1): - return array - - sections = [] - - for offset in np.ndindex(factor): - part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] - sections.append(part) - - output = sections[0].copy() - - for section in sections[1:]: - np.maximum(output, section, output) - - return output - -def striding(array): - """Downsample x by factor using striding. - - @return: The downsampled array, of the same type as x. - """ - factor = (2,2) - if np.all(np.array(factor, int) == 1): - return array - return array[tuple(np.s_[::f] for f in factor)] - -def benchmark(): - filename = sys.argv[1] - img = Image.open(filename) - data = np.array(img.getdata(), dtype=np.uint8) - - if len(data.shape) == 1: - n_channels = 1 - reshape = (img.height, img.width) - else: - n_channels = min(data.shape[1], 3) - data = data[:, :n_channels] - reshape = (img.height, img.width, n_channels) - - data = data.reshape(reshape).astype(np.uint8) - - methods = [ - simplest_countless, - quick_countless, - quick_countless_xor, - quickest_countless, - stippled_countless, - zero_corrected_countless, - countless, - downsample_with_averaging, - downsample_with_max_pooling, - ndzoom, - striding, - # countless_if, - # counting, - ] - - formats = { - 1: 'L', - 3: 'RGB', - 4: 'RGBA' - } - - if not os.path.exists('./results'): - os.mkdir('./results') - - N = 500 - img_size = float(img.width * img.height) / 1024.0 / 1024.0 - print("N = %d, %dx%d (%.2f MPx) %d chan, %s" % (N, img.width, img.height, img_size, n_channels, filename)) - print("Algorithm\tMPx/sec\tMB/sec\tSec") - for fn in methods: - print(fn.__name__, end='') - sys.stdout.flush() - - start = time.time() - # tqdm is here to show you what's going on the first time you run it. - # Feel free to remove it to get slightly more accurate timing results. - for _ in tqdm(range(N), desc=fn.__name__, disable=True): - result = fn(data) - end = time.time() - print("\r", end='') - - total_time = (end - start) - mpx = N * img_size / total_time - mbytes = N * img_size * n_channels / total_time - # Output in tab separated format to enable copy-paste into excel/numbers - print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time)) - outimg = Image.fromarray(np.squeeze(result), formats[n_channels]) - outimg.save('./results/{}.png'.format(fn.__name__, "PNG")) - -if __name__ == '__main__': - benchmark() - - -# Example results: -# N = 5, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png -# Function MPx/sec MB/sec Sec -# simplest_countless 752.855 752.855 0.01 -# quick_countless 920.328 920.328 0.01 -# zero_corrected_countless 534.143 534.143 0.01 -# countless 644.247 644.247 0.01 -# downsample_with_averaging 372.575 372.575 0.01 -# downsample_with_max_pooling 974.060 974.060 0.01 -# ndzoom 137.517 137.517 0.04 -# striding 38550.588 38550.588 0.00 -# countless_if 4.377 4.377 1.14 -# counting 0.117 0.117 42.85 - -# Run without non-numpy implementations: -# N = 2000, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png -# Algorithm MPx/sec MB/sec Sec -# simplest_countless 800.522 800.522 2.50 -# quick_countless 945.420 945.420 2.12 -# quickest_countless 947.256 947.256 2.11 -# stippled_countless 544.049 544.049 3.68 -# zero_corrected_countless 575.310 575.310 3.48 -# countless 646.684 646.684 3.09 -# downsample_with_averaging 385.132 385.132 5.19 -# downsample_with_max_poolin 988.361 988.361 2.02 -# ndzoom 163.104 163.104 12.26 -# striding 81589.340 81589.340 0.02 - - - - diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/optimizer/builder.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/optimizer/builder.py deleted file mode 100644 index f9234eed8f1f186d9d8dfda34562157ee39bdb3a..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/optimizer/builder.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import inspect - -import torch - -from ...utils import Registry, build_from_cfg - -OPTIMIZERS = Registry('optimizer') -OPTIMIZER_BUILDERS = Registry('optimizer builder') - - -def register_torch_optimizers(): - torch_optimizers = [] - for module_name in dir(torch.optim): - if module_name.startswith('__'): - continue - _optim = getattr(torch.optim, module_name) - if inspect.isclass(_optim) and issubclass(_optim, - torch.optim.Optimizer): - OPTIMIZERS.register_module()(_optim) - torch_optimizers.append(module_name) - return torch_optimizers - - -TORCH_OPTIMIZERS = register_torch_optimizers() - - -def build_optimizer_constructor(cfg): - return build_from_cfg(cfg, OPTIMIZER_BUILDERS) - - -def build_optimizer(model, cfg): - optimizer_cfg = copy.deepcopy(cfg) - constructor_type = optimizer_cfg.pop('constructor', - 'DefaultOptimizerConstructor') - paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) - optim_constructor = build_optimizer_constructor( - dict( - type=constructor_type, - optimizer_cfg=optimizer_cfg, - paramwise_cfg=paramwise_cfg)) - optimizer = optim_constructor(model) - return optimizer diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/musicxml2ly_conversion.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/musicxml2ly_conversion.py deleted file mode 100644 index df194bcdd71c57147de139f6be533c44666b209e..0000000000000000000000000000000000000000 --- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/musicxml2ly_conversion.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# -# This file is part of LilyPond, the GNU music typesetter. -# -# Copyright (C) 2016--2022 John Gourlay -# -# LilyPond is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# LilyPond is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with LilyPond. If not, see . - - -from fractions import Fraction - -import lilylib as ly -import musicexp - - -def rational_to_lily_duration(rational_len): - d = musicexp.Duration() - - d_log = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4, 32: 5, 64: 6, - 128: 7, 256: 8, 512: 9}.get(rational_len.denominator, -1) - - # Duration of the form 1/2^n or 3/2^n can be converted to a simple lilypond duration - dots = {1: 0, 3: 1, 7: 2, 15: 3, 31: 4, 63: 5, - 127: 6}.get(rational_len.numerator, -1) - if d_log >= dots >= 0: - # account for the dots! - d.duration_log = d_log - dots - d.dots = dots - elif d_log >= 0: - d.duration_log = d_log - d.factor = Fraction(rational_len.numerator) - else: - ly.warning(_("Encountered rational duration with denominator %s, " - "unable to convert to lilypond duration") % - rational_len.denominator) - # TODO: Test the above error message - return None - - return d - - -def musicxml_step_to_lily(step): - if step: - return (ord(step) - ord('A') + 7 - 2) % 7 - else: - return None - - -class Marker(musicexp.Music): - def __init__(self): - self.direction = 0 - self.event = None - - def print_ly(self, printer): - ly.warning(_("Encountered unprocessed marker %s\n") % self) - pass - - def ly_expression(self): - return "" - - -class RepeatMarker(Marker): - def __init__(self): - Marker.__init__(self) - self.times = 0 - - -class EndingMarker(Marker): - pass diff --git a/spaces/PeepDaSlan9/stabilityai-stable-diffusion-xl-base-1.0/README.md b/spaces/PeepDaSlan9/stabilityai-stable-diffusion-xl-base-1.0/README.md deleted file mode 100644 index 1a4973dca3c297af6d8fdf91b94b8655c4dc0b42..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/stabilityai-stable-diffusion-xl-base-1.0/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stabilityai Stable Diffusion Xl Base 1.0 -emoji: ⚡ -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail++ ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pie31415/control-animation/annotator/openpose/hand.py b/spaces/Pie31415/control-animation/annotator/openpose/hand.py deleted file mode 100644 index 3d0bf17165ad7eb225332b51f4a2aa16718664b2..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/openpose/hand.py +++ /dev/null @@ -1,86 +0,0 @@ -import cv2 -import json -import numpy as np -import math -import time -from scipy.ndimage.filters import gaussian_filter -import matplotlib.pyplot as plt -import matplotlib -import torch -from skimage.measure import label - -from .model import handpose_model -from . import util - -class Hand(object): - def __init__(self, model_path): - self.model = handpose_model() - if torch.cuda.is_available(): - self.model = self.model.cuda() - print('cuda') - model_dict = util.transfer(self.model, torch.load(model_path)) - self.model.load_state_dict(model_dict) - self.model.eval() - - def __call__(self, oriImg): - scale_search = [0.5, 1.0, 1.5, 2.0] - # scale_search = [0.5] - boxsize = 368 - stride = 8 - padValue = 128 - thre = 0.05 - multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] - heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22)) - # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) - - for m in range(len(multiplier)): - scale = multiplier[m] - imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) - imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) - im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 - im = np.ascontiguousarray(im) - - data = torch.from_numpy(im).float() - if torch.cuda.is_available(): - data = data.cuda() - # data = data.permute([2, 0, 1]).unsqueeze(0).float() - with torch.no_grad(): - output = self.model(data).cpu().numpy() - # output = self.model(data).numpy()q - - # extract outputs, resize, and remove padding - heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps - heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - heatmap_avg += heatmap / len(multiplier) - - all_peaks = [] - for part in range(21): - map_ori = heatmap_avg[:, :, part] - one_heatmap = gaussian_filter(map_ori, sigma=3) - binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) - # 全部小于阈值 - if np.sum(binary) == 0: - all_peaks.append([0, 0]) - continue - label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) - max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 - label_img[label_img != max_index] = 0 - map_ori[label_img == 0] = 0 - - y, x = util.npmax(map_ori) - all_peaks.append([x, y]) - return np.array(all_peaks) - -if __name__ == "__main__": - hand_estimation = Hand('../model/hand_pose_model.pth') - - # test_image = '../images/hand.jpg' - test_image = '../images/hand.jpg' - oriImg = cv2.imread(test_image) # B,G,R order - peaks = hand_estimation(oriImg) - canvas = util.draw_handpose(oriImg, peaks, True) - cv2.imshow('', canvas) - cv2.waitKey(0) \ No newline at end of file diff --git a/spaces/PrathamDesai/fastai_bear_classifier/README.md b/spaces/PrathamDesai/fastai_bear_classifier/README.md deleted file mode 100644 index 355c254dd82066059a5c00cf734d218040607646..0000000000000000000000000000000000000000 --- a/spaces/PrathamDesai/fastai_bear_classifier/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Fastai_bear_classifier -emoji: 👁 -colorFrom: red -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/model_utils.py b/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/model_utils.py deleted file mode 100644 index 673291f4b17d1c75c9c46072f692fab390378b08..0000000000000000000000000000000000000000 --- a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/model_utils.py +++ /dev/null @@ -1,512 +0,0 @@ -from __future__ import print_function -import json, time, os, sys, glob -import shutil -import numpy as np -import torch -from torch import optim -from torch.utils.data import DataLoader -from torch.utils.data.dataset import random_split, Subset -import torch.utils -import torch.utils.checkpoint - -import copy -import torch.nn as nn -import torch.nn.functional as F -import random -import itertools - - -def featurize(batch, device): - alphabet = 'ACDEFGHIKLMNPQRSTVWYX' - B = len(batch) - lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32) #sum of chain seq lengths - L_max = max([len(b['seq']) for b in batch]) - X = np.zeros([B, L_max, 4, 3]) - residue_idx = -100*np.ones([B, L_max], dtype=np.int32) #residue idx with jumps across chains - chain_M = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted, 0.0 for the bits that are given - mask_self = np.ones([B, L_max, L_max], dtype=np.int32) #for interface loss calculation - 0.0 for self interaction, 1.0 for other - chain_encoding_all = np.zeros([B, L_max], dtype=np.int32) #integer encoding for chains 0, 0, 0,...0, 1, 1,..., 1, 2, 2, 2... - S = np.zeros([B, L_max], dtype=np.int32) #sequence AAs integers - init_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G','H', 'I', 'J','K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T','U', 'V','W','X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g','h', 'i', 'j','k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't','u', 'v','w','x', 'y', 'z'] - extra_alphabet = [str(item) for item in list(np.arange(300))] - chain_letters = init_alphabet + extra_alphabet - for i, b in enumerate(batch): - masked_chains = b['masked_list'] - visible_chains = b['visible_list'] - all_chains = masked_chains + visible_chains - visible_temp_dict = {} - masked_temp_dict = {} - for step, letter in enumerate(all_chains): - chain_seq = b[f'seq_chain_{letter}'] - if letter in visible_chains: - visible_temp_dict[letter] = chain_seq - elif letter in masked_chains: - masked_temp_dict[letter] = chain_seq - for km, vm in masked_temp_dict.items(): - for kv, vv in visible_temp_dict.items(): - if vm == vv: - if kv not in masked_chains: - masked_chains.append(kv) - if kv in visible_chains: - visible_chains.remove(kv) - all_chains = masked_chains + visible_chains - random.shuffle(all_chains) #randomly shuffle chain order - num_chains = b['num_of_chains'] - mask_dict = {} - x_chain_list = [] - chain_mask_list = [] - chain_seq_list = [] - chain_encoding_list = [] - c = 1 - l0 = 0 - l1 = 0 - for step, letter in enumerate(all_chains): - if letter in visible_chains: - chain_seq = b[f'seq_chain_{letter}'] - chain_length = len(chain_seq) - chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary - chain_mask = np.zeros(chain_length) #0.0 for visible chains - x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_length,4,3] - x_chain_list.append(x_chain) - chain_mask_list.append(chain_mask) - chain_seq_list.append(chain_seq) - chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0])) - l1 += chain_length - mask_self[i, l0:l1, l0:l1] = np.zeros([chain_length, chain_length]) - residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1) - l0 += chain_length - c+=1 - elif letter in masked_chains: - chain_seq = b[f'seq_chain_{letter}'] - chain_length = len(chain_seq) - chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary - chain_mask = np.ones(chain_length) #0.0 for visible chains - x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_lenght,4,3] - x_chain_list.append(x_chain) - chain_mask_list.append(chain_mask) - chain_seq_list.append(chain_seq) - chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0])) - l1 += chain_length - mask_self[i, l0:l1, l0:l1] = np.zeros([chain_length, chain_length]) - residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1) - l0 += chain_length - c+=1 - x = np.concatenate(x_chain_list,0) #[L, 4, 3] - all_sequence = "".join(chain_seq_list) - m = np.concatenate(chain_mask_list,0) #[L,], 1.0 for places that need to be predicted - chain_encoding = np.concatenate(chain_encoding_list,0) - - l = len(all_sequence) - x_pad = np.pad(x, [[0,L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, )) - X[i,:,:,:] = x_pad - - m_pad = np.pad(m, [[0,L_max-l]], 'constant', constant_values=(0.0, )) - chain_M[i,:] = m_pad - - chain_encoding_pad = np.pad(chain_encoding, [[0,L_max-l]], 'constant', constant_values=(0.0, )) - chain_encoding_all[i,:] = chain_encoding_pad - - # Convert to labels - indices = np.asarray([alphabet.index(a) for a in all_sequence], dtype=np.int32) - S[i, :l] = indices - - isnan = np.isnan(X) - mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32) - X[isnan] = 0. - - # Conversion - residue_idx = torch.from_numpy(residue_idx).to(dtype=torch.long,device=device) - S = torch.from_numpy(S).to(dtype=torch.long,device=device) - X = torch.from_numpy(X).to(dtype=torch.float32, device=device) - mask = torch.from_numpy(mask).to(dtype=torch.float32, device=device) - mask_self = torch.from_numpy(mask_self).to(dtype=torch.float32, device=device) - chain_M = torch.from_numpy(chain_M).to(dtype=torch.float32, device=device) - chain_encoding_all = torch.from_numpy(chain_encoding_all).to(dtype=torch.long, device=device) - return X, S, mask, lengths, chain_M, residue_idx, mask_self, chain_encoding_all - - -def loss_nll(S, log_probs, mask): - """ Negative log probabilities """ - criterion = torch.nn.NLLLoss(reduction='none') - loss = criterion( - log_probs.contiguous().view(-1, log_probs.size(-1)), S.contiguous().view(-1) - ).view(S.size()) - S_argmaxed = torch.argmax(log_probs,-1) #[B, L] - true_false = (S == S_argmaxed).float() - loss_av = torch.sum(loss * mask) / torch.sum(mask) - return loss, loss_av, true_false - - -def loss_smoothed(S, log_probs, mask, weight=0.1): - """ Negative log probabilities """ - S_onehot = torch.nn.functional.one_hot(S, 21).float() - - # Label smoothing - S_onehot = S_onehot + weight / float(S_onehot.size(-1)) - S_onehot = S_onehot / S_onehot.sum(-1, keepdim=True) - - loss = -(S_onehot * log_probs).sum(-1) - loss_av = torch.sum(loss * mask) / 2000.0 #fixed - return loss, loss_av - - -# The following gather functions -def gather_edges(edges, neighbor_idx): - # Features [B,N,N,C] at Neighbor indices [B,N,K] => Neighbor features [B,N,K,C] - neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1)) - edge_features = torch.gather(edges, 2, neighbors) - return edge_features - -def gather_nodes(nodes, neighbor_idx): - # Features [B,N,C] at Neighbor indices [B,N,K] => [B,N,K,C] - # Flatten and expand indices per batch [B,N,K] => [B,NK] => [B,NK,C] - neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1)) - neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2)) - # Gather and re-pack - neighbor_features = torch.gather(nodes, 1, neighbors_flat) - neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1]) - return neighbor_features - -def gather_nodes_t(nodes, neighbor_idx): - # Features [B,N,C] at Neighbor index [B,K] => Neighbor features[B,K,C] - idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2)) - neighbor_features = torch.gather(nodes, 1, idx_flat) - return neighbor_features - -def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx): - h_nodes = gather_nodes(h_nodes, E_idx) - h_nn = torch.cat([h_neighbors, h_nodes], -1) - return h_nn - - -class EncLayer(nn.Module): - def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30): - super(EncLayer, self).__init__() - self.num_hidden = num_hidden - self.num_in = num_in - self.scale = scale - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - self.norm1 = nn.LayerNorm(num_hidden) - self.norm2 = nn.LayerNorm(num_hidden) - self.norm3 = nn.LayerNorm(num_hidden) - - self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True) - self.W2 = nn.Linear(num_hidden, num_hidden, bias=True) - self.W3 = nn.Linear(num_hidden, num_hidden, bias=True) - self.W11 = nn.Linear(num_hidden + num_in, num_hidden, bias=True) - self.W12 = nn.Linear(num_hidden, num_hidden, bias=True) - self.W13 = nn.Linear(num_hidden, num_hidden, bias=True) - self.act = torch.nn.GELU() - self.dense = PositionWiseFeedForward(num_hidden, num_hidden * 4) - - def forward(self, h_V, h_E, E_idx, mask_V=None, mask_attend=None): - """ Parallel computation of full transformer layer """ - - h_EV = cat_neighbors_nodes(h_V, h_E, E_idx) - h_V_expand = h_V.unsqueeze(-2).expand(-1,-1,h_EV.size(-2),-1) - h_EV = torch.cat([h_V_expand, h_EV], -1) - h_message = self.W3(self.act(self.W2(self.act(self.W1(h_EV))))) - if mask_attend is not None: - h_message = mask_attend.unsqueeze(-1) * h_message - dh = torch.sum(h_message, -2) / self.scale - h_V = self.norm1(h_V + self.dropout1(dh)) - - dh = self.dense(h_V) - h_V = self.norm2(h_V + self.dropout2(dh)) - if mask_V is not None: - mask_V = mask_V.unsqueeze(-1) - h_V = mask_V * h_V - - h_EV = cat_neighbors_nodes(h_V, h_E, E_idx) - h_V_expand = h_V.unsqueeze(-2).expand(-1,-1,h_EV.size(-2),-1) - h_EV = torch.cat([h_V_expand, h_EV], -1) - h_message = self.W13(self.act(self.W12(self.act(self.W11(h_EV))))) - h_E = self.norm3(h_E + self.dropout3(h_message)) - return h_V, h_E - - - -class DecLayer(nn.Module): - def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30): - super(DecLayer, self).__init__() - self.num_hidden = num_hidden - self.num_in = num_in - self.scale = scale - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.norm1 = nn.LayerNorm(num_hidden) - self.norm2 = nn.LayerNorm(num_hidden) - - self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True) - self.W2 = nn.Linear(num_hidden, num_hidden, bias=True) - self.W3 = nn.Linear(num_hidden, num_hidden, bias=True) - self.act = torch.nn.GELU() - self.dense = PositionWiseFeedForward(num_hidden, num_hidden * 4) - - def forward(self, h_V, h_E, mask_V=None, mask_attend=None): - """ Parallel computation of full transformer layer """ - - # Concatenate h_V_i to h_E_ij - h_V_expand = h_V.unsqueeze(-2).expand(-1,-1,h_E.size(-2),-1) - h_EV = torch.cat([h_V_expand, h_E], -1) - - h_message = self.W3(self.act(self.W2(self.act(self.W1(h_EV))))) - if mask_attend is not None: - h_message = mask_attend.unsqueeze(-1) * h_message - dh = torch.sum(h_message, -2) / self.scale - - h_V = self.norm1(h_V + self.dropout1(dh)) - - # Position-wise feedforward - dh = self.dense(h_V) - h_V = self.norm2(h_V + self.dropout2(dh)) - - if mask_V is not None: - mask_V = mask_V.unsqueeze(-1) - h_V = mask_V * h_V - return h_V - - -class PositionWiseFeedForward(nn.Module): - def __init__(self, num_hidden, num_ff): - super(PositionWiseFeedForward, self).__init__() - self.W_in = nn.Linear(num_hidden, num_ff, bias=True) - self.W_out = nn.Linear(num_ff, num_hidden, bias=True) - self.act = torch.nn.GELU() - def forward(self, h_V): - h = self.act(self.W_in(h_V)) - h = self.W_out(h) - return h - -class PositionalEncodings(nn.Module): - def __init__(self, num_embeddings, max_relative_feature=32): - super(PositionalEncodings, self).__init__() - self.num_embeddings = num_embeddings - self.max_relative_feature = max_relative_feature - self.linear = nn.Linear(2*max_relative_feature+1+1, num_embeddings) - - def forward(self, offset, mask): - d = torch.clip(offset + self.max_relative_feature, 0, 2*self.max_relative_feature)*mask + (1-mask)*(2*self.max_relative_feature+1) - d_onehot = torch.nn.functional.one_hot(d, 2*self.max_relative_feature+1+1) - E = self.linear(d_onehot.float()) - return E - - -class ProteinFeatures(nn.Module): - def __init__(self, edge_features, node_features, num_positional_embeddings=16, - num_rbf=16, top_k=30, augment_eps=0., num_chain_embeddings=16): - """ Extract protein features """ - super(ProteinFeatures, self).__init__() - self.edge_features = edge_features - self.node_features = node_features - self.top_k = top_k - self.augment_eps = augment_eps - self.num_rbf = num_rbf - self.num_positional_embeddings = num_positional_embeddings - - self.embeddings = PositionalEncodings(num_positional_embeddings) - node_in, edge_in = 6, num_positional_embeddings + num_rbf*25 - self.edge_embedding = nn.Linear(edge_in, edge_features, bias=False) - self.norm_edges = nn.LayerNorm(edge_features) - - def _dist(self, X, mask, eps=1E-6): - mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2) - dX = torch.unsqueeze(X,1) - torch.unsqueeze(X,2) - D = mask_2D * torch.sqrt(torch.sum(dX**2, 3) + eps) - D_max, _ = torch.max(D, -1, keepdim=True) - D_adjust = D + (1. - mask_2D) * D_max - sampled_top_k = self.top_k - D_neighbors, E_idx = torch.topk(D_adjust, np.minimum(self.top_k, X.shape[1]), dim=-1, largest=False) - return D_neighbors, E_idx - - def _rbf(self, D): - device = D.device - D_min, D_max, D_count = 2., 22., self.num_rbf - D_mu = torch.linspace(D_min, D_max, D_count, device=device) - D_mu = D_mu.view([1,1,1,-1]) - D_sigma = (D_max - D_min) / D_count - D_expand = torch.unsqueeze(D, -1) - RBF = torch.exp(-((D_expand - D_mu) / D_sigma)**2) - return RBF - - def _get_rbf(self, A, B, E_idx): - D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,None,:,:])**2,-1) + 1e-6) #[B, L, L] - D_A_B_neighbors = gather_edges(D_A_B[:,:,:,None], E_idx)[:,:,:,0] #[B,L,K] - RBF_A_B = self._rbf(D_A_B_neighbors) - return RBF_A_B - - def forward(self, X, mask, residue_idx, chain_labels): - if self.training and self.augment_eps > 0: - X = X + self.augment_eps * torch.randn_like(X) - - b = X[:,:,1,:] - X[:,:,0,:] - c = X[:,:,2,:] - X[:,:,1,:] - a = torch.cross(b, c, dim=-1) - Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + X[:,:,1,:] - Ca = X[:,:,1,:] - N = X[:,:,0,:] - C = X[:,:,2,:] - O = X[:,:,3,:] - - D_neighbors, E_idx = self._dist(Ca, mask) - - RBF_all = [] - RBF_all.append(self._rbf(D_neighbors)) #Ca-Ca - RBF_all.append(self._get_rbf(N, N, E_idx)) #N-N - RBF_all.append(self._get_rbf(C, C, E_idx)) #C-C - RBF_all.append(self._get_rbf(O, O, E_idx)) #O-O - RBF_all.append(self._get_rbf(Cb, Cb, E_idx)) #Cb-Cb - RBF_all.append(self._get_rbf(Ca, N, E_idx)) #Ca-N - RBF_all.append(self._get_rbf(Ca, C, E_idx)) #Ca-C - RBF_all.append(self._get_rbf(Ca, O, E_idx)) #Ca-O - RBF_all.append(self._get_rbf(Ca, Cb, E_idx)) #Ca-Cb - RBF_all.append(self._get_rbf(N, C, E_idx)) #N-C - RBF_all.append(self._get_rbf(N, O, E_idx)) #N-O - RBF_all.append(self._get_rbf(N, Cb, E_idx)) #N-Cb - RBF_all.append(self._get_rbf(Cb, C, E_idx)) #Cb-C - RBF_all.append(self._get_rbf(Cb, O, E_idx)) #Cb-O - RBF_all.append(self._get_rbf(O, C, E_idx)) #O-C - RBF_all.append(self._get_rbf(N, Ca, E_idx)) #N-Ca - RBF_all.append(self._get_rbf(C, Ca, E_idx)) #C-Ca - RBF_all.append(self._get_rbf(O, Ca, E_idx)) #O-Ca - RBF_all.append(self._get_rbf(Cb, Ca, E_idx)) #Cb-Ca - RBF_all.append(self._get_rbf(C, N, E_idx)) #C-N - RBF_all.append(self._get_rbf(O, N, E_idx)) #O-N - RBF_all.append(self._get_rbf(Cb, N, E_idx)) #Cb-N - RBF_all.append(self._get_rbf(C, Cb, E_idx)) #C-Cb - RBF_all.append(self._get_rbf(O, Cb, E_idx)) #O-Cb - RBF_all.append(self._get_rbf(C, O, E_idx)) #C-O - RBF_all = torch.cat(tuple(RBF_all), dim=-1) - - offset = residue_idx[:,:,None]-residue_idx[:,None,:] - offset = gather_edges(offset[:,:,:,None], E_idx)[:,:,:,0] #[B, L, K] - - d_chains = ((chain_labels[:, :, None] - chain_labels[:,None,:])==0).long() #find self vs non-self interaction - E_chains = gather_edges(d_chains[:,:,:,None], E_idx)[:,:,:,0] - E_positional = self.embeddings(offset.long(), E_chains) - E = torch.cat((E_positional, RBF_all), -1) - E = self.edge_embedding(E) - E = self.norm_edges(E) - return E, E_idx - - - -class ProteinMPNN(nn.Module): - def __init__(self, num_letters=21, node_features=128, edge_features=128, - hidden_dim=128, num_encoder_layers=3, num_decoder_layers=3, - vocab=21, k_neighbors=32, augment_eps=0.1, dropout=0.1): - super(ProteinMPNN, self).__init__() - - # Hyperparameters - self.node_features = node_features - self.edge_features = edge_features - self.hidden_dim = hidden_dim - - self.features = ProteinFeatures(node_features, edge_features, top_k=k_neighbors, augment_eps=augment_eps) - - self.W_e = nn.Linear(edge_features, hidden_dim, bias=True) - self.W_s = nn.Embedding(vocab, hidden_dim) - - # Encoder layers - self.encoder_layers = nn.ModuleList([ - EncLayer(hidden_dim, hidden_dim*2, dropout=dropout) - for _ in range(num_encoder_layers) - ]) - - # Decoder layers - self.decoder_layers = nn.ModuleList([ - DecLayer(hidden_dim, hidden_dim*3, dropout=dropout) - for _ in range(num_decoder_layers) - ]) - self.W_out = nn.Linear(hidden_dim, num_letters, bias=True) - - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, X, S, mask, chain_M, residue_idx, chain_encoding_all): - """ Graph-conditioned sequence model """ - device=X.device - # Prepare node and edge embeddings - E, E_idx = self.features(X, mask, residue_idx, chain_encoding_all) - h_V = torch.zeros((E.shape[0], E.shape[1], E.shape[-1]), device=E.device) - h_E = self.W_e(E) - - # Encoder is unmasked self-attention - mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1) - mask_attend = mask.unsqueeze(-1) * mask_attend - for layer in self.encoder_layers: - h_V, h_E = torch.utils.checkpoint.checkpoint(layer, h_V, h_E, E_idx, mask, mask_attend) - - # Concatenate sequence embeddings for autoregressive decoder - h_S = self.W_s(S) - h_ES = cat_neighbors_nodes(h_S, h_E, E_idx) - - # Build encoder embeddings - h_EX_encoder = cat_neighbors_nodes(torch.zeros_like(h_S), h_E, E_idx) - h_EXV_encoder = cat_neighbors_nodes(h_V, h_EX_encoder, E_idx) - - - chain_M = chain_M*mask #update chain_M to include missing regions - decoding_order = torch.argsort((chain_M+0.0001)*(torch.abs(torch.randn(chain_M.shape, device=device)))) #[numbers will be smaller for places where chain_M = 0.0 and higher for places where chain_M = 1.0] - mask_size = E_idx.shape[1] - permutation_matrix_reverse = torch.nn.functional.one_hot(decoding_order, num_classes=mask_size).float() - order_mask_backward = torch.einsum('ij, biq, bjp->bqp',(1-torch.triu(torch.ones(mask_size,mask_size, device=device))), permutation_matrix_reverse, permutation_matrix_reverse) - mask_attend = torch.gather(order_mask_backward, 2, E_idx).unsqueeze(-1) - mask_1D = mask.view([mask.size(0), mask.size(1), 1, 1]) - mask_bw = mask_1D * mask_attend - mask_fw = mask_1D * (1. - mask_attend) - - h_EXV_encoder_fw = mask_fw * h_EXV_encoder - for layer in self.decoder_layers: - h_ESV = cat_neighbors_nodes(h_V, h_ES, E_idx) - h_ESV = mask_bw * h_ESV + h_EXV_encoder_fw - h_V = torch.utils.checkpoint.checkpoint(layer, h_V, h_ESV, mask) - - logits = self.W_out(h_V) - log_probs = F.log_softmax(logits, dim=-1) - return log_probs - - - -class NoamOpt: - "Optim wrapper that implements rate." - def __init__(self, model_size, factor, warmup, optimizer, step): - self.optimizer = optimizer - self._step = step - self.warmup = warmup - self.factor = factor - self.model_size = model_size - self._rate = 0 - - @property - def param_groups(self): - """Return param_groups.""" - return self.optimizer.param_groups - - def step(self): - "Update parameters and rate" - self._step += 1 - rate = self.rate() - for p in self.optimizer.param_groups: - p['lr'] = rate - self._rate = rate - self.optimizer.step() - - def rate(self, step = None): - "Implement `lrate` above" - if step is None: - step = self._step - return self.factor * \ - (self.model_size ** (-0.5) * - min(step ** (-0.5), step * self.warmup ** (-1.5))) - - def zero_grad(self): - self.optimizer.zero_grad() - -def get_std_opt(parameters, d_model, step): - return NoamOpt( - d_model, 2, 4000, torch.optim.Adam(parameters, lr=0, betas=(0.9, 0.98), eps=1e-9), step - ) diff --git a/spaces/QINGCHE/TSA/inference.py b/spaces/QINGCHE/TSA/inference.py deleted file mode 100644 index 59d3f34ddbc88e0a88839dc692044ba97a09d670..0000000000000000000000000000000000000000 --- a/spaces/QINGCHE/TSA/inference.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import transformers -import torch -import torch.nn as nn -from torch import cuda -from transformers import BertTokenizer -from BERT_inference import BertClassificationModel - - -def encoder(max_len,text): - - tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") - tokenizer = tokenizer( - text, - padding = True, - truncation = True, - max_length = max_len, - return_tensors='pt' - ) - input_ids = tokenizer['input_ids'] - token_type_ids = tokenizer['token_type_ids'] - attention_mask = tokenizer['attention_mask'] - return input_ids,token_type_ids,attention_mask - - -def predict(model,device,text): - model.to(device) - model.eval() - with torch.no_grad(): - input_ids,token_type_ids,attention_mask = encoder(512,text) - input_ids,token_type_ids,attention_mask=input_ids.to(device),token_type_ids.to(device),attention_mask.to(device) - out_put = model(input_ids,token_type_ids,attention_mask) - # pre_numpy = out_put.cpu().numpy().tolist() - probs = torch.nn.functional.softmax(out_put).detach().cpu().numpy().tolist() - # print(probs) - return probs[0][1] - - -def inference_matrix(topics): - device = torch.device('cuda' if cuda.is_available() else 'cpu') - load_path = "bert_model.pkl" - model = torch.load(load_path,map_location=torch.device(device)) - matrix = np.zeros([len(topics),len(topics)],dtype=float) - for i,i_text in enumerate(topics): - for j,j_text in enumerate(topics): - if(i == j): - matrix[i][j] = 0 - else: - test = i_text+" 是否包含 "+j_text - outputs = predict(model,device,test) - # outputs = model(ids, mask,token_type_ids) - # print(outputs) - matrix[i][j] = outputs - - return matrix -if __name__ == "__main__": - - print("yes") - topics = ['在本次报告中我将介绍分布式并行加速算法模型架构内存和计算优化以及集群架构等关键技术', '在现代机器学习任务中大模型训练已成为解决复杂问题的重要手段', '首先分布式并行加速策略包括数据并行模型并行流水线并行和张量并行等四种方式', '选择合适的集群架构是实现大模型的分布式训练的关键', '这些策略帮助我们将训练数据和模型分布到多个设备上以加速大模型训练过程'] - print(inference_matrix(topics)) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/__init__.py deleted file mode 100644 index 38ea0f5f11f434fc2d006556efb96bd7998f4e9b..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Wrappers to build Python packages using PEP 517 hooks -""" - -__version__ = '0.13.0' - -from .wrappers import * # noqa: F401, F403 diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/transforms.py b/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/transforms.py deleted file mode 100644 index b33c3f30f422bca6a81aa201952b7bb2d3d906bf..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/transforms.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import Dict -import numpy as np -import torch -import kornia.augmentation as K -from kornia.geometry.transform import warp_perspective - -# Adapted from Kornia -class GeometricSequential: - def __init__(self, *transforms, align_corners=True) -> None: - self.transforms = transforms - self.align_corners = align_corners - - def __call__(self, x, mode="bilinear"): - b, c, h, w = x.shape - M = torch.eye(3, device=x.device)[None].expand(b, 3, 3) - for t in self.transforms: - if np.random.rand() < t.p: - M = M.matmul( - t.compute_transformation( - x, t.generate_parameters((b, c, h, w)), None - ) - ) - return ( - warp_perspective( - x, M, dsize=(h, w), mode=mode, align_corners=self.align_corners - ), - M, - ) - - def apply_transform(self, x, M, mode="bilinear"): - b, c, h, w = x.shape - return warp_perspective( - x, M, dsize=(h, w), align_corners=self.align_corners, mode=mode - ) - - -class RandomPerspective(K.RandomPerspective): - def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]: - distortion_scale = torch.as_tensor( - self.distortion_scale, device=self._device, dtype=self._dtype - ) - return self.random_perspective_generator( - batch_shape[0], - batch_shape[-2], - batch_shape[-1], - distortion_scale, - self.same_on_batch, - self.device, - self.dtype, - ) - - def random_perspective_generator( - self, - batch_size: int, - height: int, - width: int, - distortion_scale: torch.Tensor, - same_on_batch: bool = False, - device: torch.device = torch.device("cpu"), - dtype: torch.dtype = torch.float32, - ) -> Dict[str, torch.Tensor]: - r"""Get parameters for ``perspective`` for a random perspective transform. - - Args: - batch_size (int): the tensor batch size. - height (int) : height of the image. - width (int): width of the image. - distortion_scale (torch.Tensor): it controls the degree of distortion and ranges from 0 to 1. - same_on_batch (bool): apply the same transformation across the batch. Default: False. - device (torch.device): the device on which the random numbers will be generated. Default: cpu. - dtype (torch.dtype): the data type of the generated random numbers. Default: float32. - - Returns: - params Dict[str, torch.Tensor]: parameters to be passed for transformation. - - start_points (torch.Tensor): element-wise perspective source areas with a shape of (B, 4, 2). - - end_points (torch.Tensor): element-wise perspective target areas with a shape of (B, 4, 2). - - Note: - The generated random numbers are not reproducible across different devices and dtypes. - """ - if not (distortion_scale.dim() == 0 and 0 <= distortion_scale <= 1): - raise AssertionError( - f"'distortion_scale' must be a scalar within [0, 1]. Got {distortion_scale}." - ) - if not ( - type(height) is int and height > 0 and type(width) is int and width > 0 - ): - raise AssertionError( - f"'height' and 'width' must be integers. Got {height}, {width}." - ) - - start_points: torch.Tensor = torch.tensor( - [[[0.0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]], - device=distortion_scale.device, - dtype=distortion_scale.dtype, - ).expand(batch_size, -1, -1) - - # generate random offset not larger than half of the image - fx = distortion_scale * width / 2 - fy = distortion_scale * height / 2 - - factor = torch.stack([fx, fy], dim=0).view(-1, 1, 2) - offset = (torch.rand_like(start_points) - 0.5) * 2 - end_points = start_points + factor * offset - - return dict(start_points=start_points, end_points=end_points) - - -class RandomErasing: - def __init__(self, p=0.0, scale=0.0) -> None: - self.p = p - self.scale = scale - self.random_eraser = K.RandomErasing(scale=(0.02, scale), p=p) - - def __call__(self, image, depth): - if self.p > 0: - image = self.random_eraser(image) - depth = self.random_eraser(depth, params=self.random_eraser._params) - return image, depth diff --git a/spaces/RegalHyperus/rvc-anime-game/infer_pack/models_onnx.py b/spaces/RegalHyperus/rvc-anime-game/infer_pack/models_onnx.py deleted file mode 100644 index 3c5be53a572151820de7d82dfce84f2e2979ed56..0000000000000000000000000000000000000000 --- a/spaces/RegalHyperus/rvc-anime-game/infer_pack/models_onnx.py +++ /dev/null @@ -1,760 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsidO(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Rick93/image_to_story_naive/README.md b/spaces/Rick93/image_to_story_naive/README.md deleted file mode 100644 index 6642f2e9c0ed17b110258f9920be1b52de11f38c..0000000000000000000000000000000000000000 --- a/spaces/Rick93/image_to_story_naive/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image To Story Naive -emoji: 🐠 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py deleted file mode 100644 index bbe70145b8bf7c304370f725f5afa8db98666679..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .res_layer import ResLayer - -__all__ = ['ResLayer'] diff --git a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/melgan.py b/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/melgan.py deleted file mode 100644 index e021ae4817a8c1c97338e61b00b230c881836fd8..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/melgan.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2020 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""MelGAN Modules.""" - -import logging - -import numpy as np -import torch - -from modules.parallel_wavegan.layers import CausalConv1d -from modules.parallel_wavegan.layers import CausalConvTranspose1d -from modules.parallel_wavegan.layers import ResidualStack - - -class MelGANGenerator(torch.nn.Module): - """MelGAN generator module.""" - - def __init__(self, - in_channels=80, - out_channels=1, - kernel_size=7, - channels=512, - bias=True, - upsample_scales=[8, 8, 2, 2], - stack_kernel_size=3, - stacks=3, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - pad="ReflectionPad1d", - pad_params={}, - use_final_nonlinear_activation=True, - use_weight_norm=True, - use_causal_conv=False, - ): - """Initialize MelGANGenerator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of initial and final conv layer. - channels (int): Initial number of channels for conv layer. - bias (bool): Whether to add bias parameter in convolution layers. - upsample_scales (list): List of upsampling scales. - stack_kernel_size (int): Kernel size of dilated conv layers in residual stack. - stacks (int): Number of stacks in a single residual stack. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - pad (str): Padding function module name before dilated convolution layer. - pad_params (dict): Hyperparameters for padding function. - use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal convolution. - - """ - super(MelGANGenerator, self).__init__() - - # check hyper parameters is valid - assert channels >= np.prod(upsample_scales) - assert channels % (2 ** len(upsample_scales)) == 0 - if not use_causal_conv: - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - - # add initial layer - layers = [] - if not use_causal_conv: - layers += [ - getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), - torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias), - ] - else: - layers += [ - CausalConv1d(in_channels, channels, kernel_size, - bias=bias, pad=pad, pad_params=pad_params), - ] - - for i, upsample_scale in enumerate(upsample_scales): - # add upsampling layer - layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] - if not use_causal_conv: - layers += [ - torch.nn.ConvTranspose1d( - channels // (2 ** i), - channels // (2 ** (i + 1)), - upsample_scale * 2, - stride=upsample_scale, - padding=upsample_scale // 2 + upsample_scale % 2, - output_padding=upsample_scale % 2, - bias=bias, - ) - ] - else: - layers += [ - CausalConvTranspose1d( - channels // (2 ** i), - channels // (2 ** (i + 1)), - upsample_scale * 2, - stride=upsample_scale, - bias=bias, - ) - ] - - # add residual stack - for j in range(stacks): - layers += [ - ResidualStack( - kernel_size=stack_kernel_size, - channels=channels // (2 ** (i + 1)), - dilation=stack_kernel_size ** j, - bias=bias, - nonlinear_activation=nonlinear_activation, - nonlinear_activation_params=nonlinear_activation_params, - pad=pad, - pad_params=pad_params, - use_causal_conv=use_causal_conv, - ) - ] - - # add final layer - layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)] - if not use_causal_conv: - layers += [ - getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params), - torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias), - ] - else: - layers += [ - CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, - bias=bias, pad=pad, pad_params=pad_params), - ] - if use_final_nonlinear_activation: - layers += [torch.nn.Tanh()] - - # define the model as a single function - self.melgan = torch.nn.Sequential(*layers) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - # reset parameters - self.reset_parameters() - - def forward(self, c): - """Calculate forward propagation. - - Args: - c (Tensor): Input tensor (B, channels, T). - - Returns: - Tensor: Output tensor (B, 1, T ** prod(upsample_scales)). - - """ - return self.melgan(c) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def reset_parameters(self): - """Reset parameters. - - This initialization follows official implementation manner. - https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py - - """ - def _reset_parameters(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): - m.weight.data.normal_(0.0, 0.02) - logging.debug(f"Reset parameters in {m}.") - - self.apply(_reset_parameters) - - -class MelGANDiscriminator(torch.nn.Module): - """MelGAN discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_sizes=[5, 3], - channels=16, - max_downsample_channels=1024, - bias=True, - downsample_scales=[4, 4, 4, 4], - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - pad="ReflectionPad1d", - pad_params={}, - ): - """Initilize MelGAN discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer, - and the first and the second kernel sizes will be used for the last two layers. - For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15, - the last two layers' kernel size will be 5 and 3, respectively. - channels (int): Initial number of channels for conv layer. - max_downsample_channels (int): Maximum number of channels for downsampling layers. - bias (bool): Whether to add bias parameter in convolution layers. - downsample_scales (list): List of downsampling scales. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - pad (str): Padding function module name before dilated convolution layer. - pad_params (dict): Hyperparameters for padding function. - - """ - super(MelGANDiscriminator, self).__init__() - self.layers = torch.nn.ModuleList() - - # check kernel size is valid - assert len(kernel_sizes) == 2 - assert kernel_sizes[0] % 2 == 1 - assert kernel_sizes[1] % 2 == 1 - - # add first layer - self.layers += [ - torch.nn.Sequential( - getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params), - torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias), - getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - ) - ] - - # add downsample layers - in_chs = channels - for downsample_scale in downsample_scales: - out_chs = min(in_chs * downsample_scale, max_downsample_channels) - self.layers += [ - torch.nn.Sequential( - torch.nn.Conv1d( - in_chs, out_chs, - kernel_size=downsample_scale * 10 + 1, - stride=downsample_scale, - padding=downsample_scale * 5, - groups=in_chs // 4, - bias=bias, - ), - getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - ) - ] - in_chs = out_chs - - # add final layers - out_chs = min(in_chs * 2, max_downsample_channels) - self.layers += [ - torch.nn.Sequential( - torch.nn.Conv1d( - in_chs, out_chs, kernel_sizes[0], - padding=(kernel_sizes[0] - 1) // 2, - bias=bias, - ), - getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - ) - ] - self.layers += [ - torch.nn.Conv1d( - out_chs, out_channels, kernel_sizes[1], - padding=(kernel_sizes[1] - 1) // 2, - bias=bias, - ), - ] - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - List: List of output tensors of each layer. - - """ - outs = [] - for f in self.layers: - x = f(x) - outs += [x] - - return outs - - -class MelGANMultiScaleDiscriminator(torch.nn.Module): - """MelGAN multi-scale discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - scales=3, - downsample_pooling="AvgPool1d", - # follow the official implementation setting - downsample_pooling_params={ - "kernel_size": 4, - "stride": 2, - "padding": 1, - "count_include_pad": False, - }, - kernel_sizes=[5, 3], - channels=16, - max_downsample_channels=1024, - bias=True, - downsample_scales=[4, 4, 4, 4], - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - pad="ReflectionPad1d", - pad_params={}, - use_weight_norm=True, - ): - """Initilize MelGAN multi-scale discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - downsample_pooling (str): Pooling module name for downsampling of the inputs. - downsample_pooling_params (dict): Parameters for the above pooling module. - kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer, - and the first and the second kernel sizes will be used for the last two layers. - channels (int): Initial number of channels for conv layer. - max_downsample_channels (int): Maximum number of channels for downsampling layers. - bias (bool): Whether to add bias parameter in convolution layers. - downsample_scales (list): List of downsampling scales. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - pad (str): Padding function module name before dilated convolution layer. - pad_params (dict): Hyperparameters for padding function. - use_causal_conv (bool): Whether to use causal convolution. - - """ - super(MelGANMultiScaleDiscriminator, self).__init__() - self.discriminators = torch.nn.ModuleList() - - # add discriminators - for _ in range(scales): - self.discriminators += [ - MelGANDiscriminator( - in_channels=in_channels, - out_channels=out_channels, - kernel_sizes=kernel_sizes, - channels=channels, - max_downsample_channels=max_downsample_channels, - bias=bias, - downsample_scales=downsample_scales, - nonlinear_activation=nonlinear_activation, - nonlinear_activation_params=nonlinear_activation_params, - pad=pad, - pad_params=pad_params, - ) - ] - self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - # reset parameters - self.reset_parameters() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - List: List of list of each discriminator outputs, which consists of each layer output tensors. - - """ - outs = [] - for f in self.discriminators: - outs += [f(x)] - x = self.pooling(x) - - return outs - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def reset_parameters(self): - """Reset parameters. - - This initialization follows official implementation manner. - https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py - - """ - def _reset_parameters(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d): - m.weight.data.normal_(0.0, 0.02) - logging.debug(f"Reset parameters in {m}.") - - self.apply(_reset_parameters) diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/cantonese.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/cantonese.py deleted file mode 100644 index b66d12138b81b70b86f18217d24a08fce76305c0..0000000000000000000000000000000000000000 --- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/SRankChatGpt/Presentation-Assistant/app.py b/spaces/SRankChatGpt/Presentation-Assistant/app.py deleted file mode 100644 index 971d0c0dbf6123072971e8d629a016c1414ad286..0000000000000000000000000000000000000000 --- a/spaces/SRankChatGpt/Presentation-Assistant/app.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import presentation_assistant.env_set as env -env.env_set() -print(os.getcwd()) - -import streamlit as st -import PyPDF2 -import openai -import subprocess -from io import BytesIO -from pptx import Presentation - - -import presentation_assistant.presentation_assistant as pa - -tab1, tab2, tab3 = st.tabs(['What is PA!?', 'Text2PPT', 'PPT2Script']) - -with tab1: - st.header('Introduction') - st.title('PA!(Presentation Assistant):sparkles:') - contents = """ - ▶ Based on the content entered by the user, it :blue[automatically creates] PPT and - provides a presentation :red[script] to improve presentation skills!""" - st.markdown(contents) - st.markdown('-------------------------') - st.header('How to use') - st.subheader('Text2PPT') - contents = """ - ▶ If the user provides a link or file, we will :blue[create a presentation material] for you! - The user only needs to select the desired theme (template) type and number of pages!""" - st.markdown(contents) - st.subheader('PPT2Script') - contents = """ - ▶ If the user provides PPT or PDF presentation materials, we will automatically create a :blue[presentation script] for you!""" - st.markdown(contents) - - # Test - # test_ppt_theme = "--reference-doc="+"/home/user/app/template/blue"+".pptx" - # subprocess.run(["/home/user/app/pandoc-2.14.2/bin/pandoc", "text2ppt_test.md", "-t", "pptx", test_ppt_theme, "-o", "output.pptx"], capture_output=True) - # print(os.listdir(os.getcwd())) - # prs = Presentation("output.pptx") - # binary_output = BytesIO() - # prs.save(binary_output) - # st.download_button(label="Download PPT", - # data = binary_output.getvalue(), - # file_name="export_output.pptx", - # mime='application/octet-stream', key = "") - - -with tab2: - st.header('Text2PPT') - gpt_token = st.text_input('Please enter your ChatGPT API token.', key="") - st.markdown('-------------------------') - - st.subheader(':computer: PPT Auto Generator :computer:') - - thema_select = st.selectbox( - 'Please select the template you want.', - ['default', 'yellow', 'gradation_green', 'blue', 'green', 'custom']) - - if thema_select == "custom": - uploaded_template_file = st.file_uploader('Choose File!', type='pptx', key="") - - st.markdown('-------------------------') - - page_choice = st.slider('Number of PPT pages', min_value=2, max_value=10, step=1, value=5) - - st.markdown('-------------------------') - - my_order = ['Text', 'Link', 'PDF'] - status = st.radio('Please select the file type and enter the content! :smile: ', my_order) - - # First method - if status == my_order[0]: - input_text = st.text_area('Enter TEXT', height=5) - - elif status == my_order[1]: - input_text = st.text_area('Enter URL', height=5) - - elif status == my_order[2]: - input_text = st.file_uploader('Upload PDF', type=['pdf']) - - input_text_check = st.button('Confirm', key="") - - st.markdown('-------------------------') - - if input_text_check == True: - with st.spinner('Wait for it...'): - pa.text2ppt(gpt_token, pa.generate_text2ppt_input_prompt(status, input_text, page_choice), thema_select) - prs = Presentation("text2ppt_output.pptx") - binary_output = BytesIO() - prs.save(binary_output) - st.success('Done!') - st.download_button(label="Download PPT", - data = binary_output.getvalue(), - file_name="export_output.pptx", - mime='application/octet-stream', key = "") - -with tab3: - st.header('PPT2Script') - st.subheader(':computer: Script Auto Generator :computer:') - gpt_token = st.text_input('Please enter your ChatGPT API token.', key="") - st.markdown('-------------------------') - - st.subheader(':bookmark_tabs: Presentation Script Generator') - - file_order = ['PDF', 'PPT'] - choose = st.radio('Please select the file format of the presentation material', file_order) - - if choose == file_order[0]: - uploaded_file = st.file_uploader('Choose File!', type='pdf', key="") - elif choose == file_order[1]: - uploaded_file = st.file_uploader('Choose File!', type='pptx', key="") - - input_file_check = st.button('Confirm', key="") # When this button is pressed, the input file should be passed - st.markdown('-------------------------') - - if input_file_check == True: - with st.spinner('Wait for it...'): - with open(uploaded_file.name, mode='wb') as w: - w.write(uploaded_file.getvalue()) - - script = pa.ppt2script(gpt_token, uploaded_file.name, choose) - - st.success('Done!') - st.download_button('Download Script', - data=script, file_name="script_output.txt", key="") diff --git a/spaces/SWHL/PaperEdgeDemo/demo_cpu.py b/spaces/SWHL/PaperEdgeDemo/demo_cpu.py deleted file mode 100644 index e486dda143570187e42196d5e81017afee0857a3..0000000000000000000000000000000000000000 --- a/spaces/SWHL/PaperEdgeDemo/demo_cpu.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -import argparse -import copy -import time -from pathlib import Path - -import cv2 -import numpy as np -import torch -import torch.nn.functional as F -from networks.paperedge_cpu import GlobalWarper, LocalWarper, WarperUtil - -cv2.setNumThreads(0) -cv2.ocl.setUseOpenCL(False) - - -class PaperEdge(object): - def __init__(self, enet_path, tnet_path, device) -> None: - self.device = device - - self.netG = GlobalWarper().to(device) - netG_state = torch.load(enet_path, map_location=device)['G'] - self.netG.load_state_dict(netG_state) - self.netG.eval() - - self.netL = LocalWarper().to(device) - netL_state = torch.load(tnet_path, map_location=device)['L'] - self.netL.load_state_dict(netL_state) - self.netL.eval() - - self.warpUtil = WarperUtil(64).to(device) - - @staticmethod - def load_img(img_path): - im = cv2.imread(img_path).astype(np.float32) / 255.0 - im = im[:, :, (2, 1, 0)] - im = cv2.resize(im, (256, 256), interpolation=cv2.INTER_AREA) - im = torch.from_numpy(np.transpose(im, (2, 0, 1))) - return im - - def infer(self, img_path): - gs_d, ls_d = None, None - with torch.no_grad(): - x = self.load_img(img_path) - x = x.unsqueeze(0).to(self.device) - - d = self.netG(x) - - d = self.warpUtil.global_post_warp(d, 64) - - gs_d = copy.deepcopy(d) - - d = F.interpolate(d, size=256, mode='bilinear', align_corners=True) - y0 = F.grid_sample(x, d.permute(0, 2, 3, 1), align_corners=True) - ls_d = self.netL(y0) - - ls_d = F.interpolate(ls_d, size=256, mode='bilinear', align_corners=True) - ls_d = ls_d.clamp(-1.0, 1.0) - - im = cv2.imread(img_path).astype(np.float32) / 255.0 - im = torch.from_numpy(np.transpose(im, (2, 0, 1))) - im = im.to(self.device).unsqueeze(0) - - gs_d = F.interpolate(gs_d, (im.size(2), im.size(3)), mode='bilinear', align_corners=True) - gs_y = F.grid_sample(im, gs_d.permute(0, 2, 3, 1), align_corners=True).detach() - - ls_d = F.interpolate(ls_d, (im.size(2), im.size(3)), mode='bilinear', align_corners=True) - ls_y = F.grid_sample(gs_y, ls_d.permute(0, 2, 3, 1), align_corners=True).detach() - ls_y = ls_y.squeeze().permute(1, 2, 0).cpu().numpy() - - save_path = f'{dst_dir}/result_ls.png' - cv2.imwrite(save_path, ls_y * 255.) - return save_path - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--Enet_ckpt', type=str, - default='models/G_w_checkpoint_13820.pt') - parser.add_argument('--Tnet_ckpt', type=str, - default='models/L_w_checkpoint_27640.pt') - parser.add_argument('--img_path', type=str, default='images/3.jpg') - parser.add_argument('--out_dir', type=str, default='output') - parser.add_argument('--device', type=str, default='cpu') - args = parser.parse_args() - - if args.device == 'cuda' and torch.cuda.is_available(): - device = torch.device('cuda:0') - else: - device = torch.device('cpu') - - dst_dir = args.out_dir - Path(dst_dir).mkdir(parents=True, exist_ok=True) - - paper_edge = PaperEdge(args.Enet_ckpt, args.Tnet_ckpt, args.device) - - paper_edge.inder(args.img_path) - print('ok') \ No newline at end of file diff --git a/spaces/ServerX/PorcoDiaz/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/ServerX/PorcoDiaz/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index 823b44fb64898e8dcbb12180ba45d1718f9b03f7..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Shizune/neko-proxy/README.md b/spaces/Shizune/neko-proxy/README.md deleted file mode 100644 index aeae762d373e78d094af59541f30e53cd6668d0e..0000000000000000000000000000000000000000 --- a/spaces/Shizune/neko-proxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Neko Proxy -emoji: 🔥 -colorFrom: red -colorTo: yellow -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py deleted file mode 100644 index 12f6d402a3c4a113d4c37be062790fa435b72104..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Evaluation with objective metrics for the pretrained AudioGen models. -This grid takes signature from the training grid and runs evaluation-only stage. - -When running the grid for the first time, please use: -REGEN=1 dora grid audiogen.audiogen_pretrained_16khz_eval -and re-use the REGEN=1 option when the grid is changed to force regenerating it. - -Note that you need the proper metrics external libraries setup to use all -the objective metrics activated in this grid. Refer to the README for more information. -""" - -import os - -from ..musicgen._explorers import GenerationEvalExplorer -from ...environment import AudioCraftEnvironment -from ... import train - - -def eval(launcher, batch_size: int = 32): - opts = { - 'dset': 'audio/audiocaps_16khz', - 'solver/audiogen/evaluation': 'objective_eval', - 'execute_only': 'evaluate', - '+dataset.evaluate.batch_size': batch_size, - '+metrics.fad.tf.batch_size': 32, - } - # binary for FAD computation: replace this path with your own path - metrics_opts = { - 'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research' - } - opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.} - opt2 = {'transformer_lm.two_step_cfg': True} - - sub = launcher.bind(opts) - sub.bind_(metrics_opts) - - # base objective metrics - sub(opt1, opt2) - - -@GenerationEvalExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=4, partition=partitions) - - if 'REGEN' not in os.environ: - folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1] - with launcher.job_array(): - for sig in folder.iterdir(): - if not sig.is_symlink(): - continue - xp = train.main.get_xp_from_sig(sig.name) - launcher(xp.argv) - return - - audiogen_base = launcher.bind(solver="audiogen/audiogen_base_16khz") - audiogen_base.bind_({'autocast': False, 'fsdp.use': True}) - - audiogen_base_medium = audiogen_base.bind({'continue_from': '//pretrained/facebook/audiogen-medium'}) - audiogen_base_medium.bind_({'model/lm/model_scale': 'medium'}) - eval(audiogen_base_medium, batch_size=128) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PcdImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PcdImagePlugin.py deleted file mode 100644 index e390f3fe51dcb1ef4a490b55d18ac827e170aa37..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PcdImagePlugin.py +++ /dev/null @@ -1,62 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PCD file handling -# -# History: -# 96-05-10 fl Created -# 96-05-27 fl Added draft mode (128x192, 256x384) -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# - - -from . import Image, ImageFile - -## -# Image plugin for PhotoCD images. This plugin only reads the 768x512 -# image from the file; higher resolutions are encoded in a proprietary -# encoding. - - -class PcdImageFile(ImageFile.ImageFile): - format = "PCD" - format_description = "Kodak PhotoCD" - - def _open(self): - # rough - self.fp.seek(2048) - s = self.fp.read(2048) - - if s[:4] != b"PCD_": - msg = "not a PCD file" - raise SyntaxError(msg) - - orientation = s[1538] & 3 - self.tile_post_rotate = None - if orientation == 1: - self.tile_post_rotate = 90 - elif orientation == 3: - self.tile_post_rotate = -90 - - self.mode = "RGB" - self._size = 768, 512 # FIXME: not correct for rotated images! - self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)] - - def load_end(self): - if self.tile_post_rotate: - # Handle rotated PCDs - self.im = self.im.rotate(self.tile_post_rotate) - self._size = self.im.size - - -# -# registry - -Image.register_open(PcdImageFile.format, PcdImageFile) - -Image.register_extension(PcdImageFile.format, ".pcd") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/fastapi.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/fastapi.py deleted file mode 100644 index 0c41778c551159700a733d2a1517ce829c34bd25..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/fastapi.py +++ /dev/null @@ -1,401 +0,0 @@ -from typing import Optional, cast -from chromadb.api import API -from chromadb.config import System -from chromadb.api.types import ( - Documents, - Embeddings, - EmbeddingFunction, - IDs, - Include, - Metadatas, - Where, - WhereDocument, - GetResult, - QueryResult, - CollectionMetadata, -) -import chromadb.utils.embedding_functions as ef -import pandas as pd -import requests -import json -from typing import Sequence -from chromadb.api.models.Collection import Collection -import chromadb.errors as errors -from uuid import UUID -from chromadb.telemetry import Telemetry -from overrides import override - - -class FastAPI(API): - def __init__(self, system: System): - super().__init__(system) - url_prefix = "https" if system.settings.chroma_server_ssl_enabled else "http" - system.settings.require("chroma_server_host") - system.settings.require("chroma_server_http_port") - self._api_url = f"{url_prefix}://{system.settings.chroma_server_host}:{system.settings.chroma_server_http_port}/api/v1" - self._telemetry_client = self.require(Telemetry) - - @override - def heartbeat(self) -> int: - """Returns the current server time in nanoseconds to check if the server is alive""" - resp = requests.get(self._api_url) - raise_chroma_error(resp) - return int(resp.json()["nanosecond heartbeat"]) - - @override - def list_collections(self) -> Sequence[Collection]: - """Returns a list of all collections""" - resp = requests.get(self._api_url + "/collections") - raise_chroma_error(resp) - json_collections = resp.json() - collections = [] - for json_collection in json_collections: - collections.append(Collection(self, **json_collection)) - - return collections - - @override - def create_collection( - self, - name: str, - metadata: Optional[CollectionMetadata] = None, - embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(), - get_or_create: bool = False, - ) -> Collection: - """Creates a collection""" - resp = requests.post( - self._api_url + "/collections", - data=json.dumps( - {"name": name, "metadata": metadata, "get_or_create": get_or_create} - ), - ) - raise_chroma_error(resp) - resp_json = resp.json() - return Collection( - client=self, - id=resp_json["id"], - name=resp_json["name"], - embedding_function=embedding_function, - metadata=resp_json["metadata"], - ) - - @override - def get_collection( - self, - name: str, - embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(), - ) -> Collection: - """Returns a collection""" - resp = requests.get(self._api_url + "/collections/" + name) - raise_chroma_error(resp) - resp_json = resp.json() - return Collection( - client=self, - name=resp_json["name"], - id=resp_json["id"], - embedding_function=embedding_function, - metadata=resp_json["metadata"], - ) - - @override - def get_or_create_collection( - self, - name: str, - metadata: Optional[CollectionMetadata] = None, - embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(), - ) -> Collection: - """Get a collection, or return it if it exists""" - - return self.create_collection( - name, metadata, embedding_function, get_or_create=True - ) - - @override - def _modify( - self, - id: UUID, - new_name: Optional[str] = None, - new_metadata: Optional[CollectionMetadata] = None, - ) -> None: - """Updates a collection""" - resp = requests.put( - self._api_url + "/collections/" + str(id), - data=json.dumps({"new_metadata": new_metadata, "new_name": new_name}), - ) - raise_chroma_error(resp) - - @override - def delete_collection(self, name: str) -> None: - """Deletes a collection""" - resp = requests.delete(self._api_url + "/collections/" + name) - raise_chroma_error(resp) - - @override - def _count(self, collection_id: UUID) -> int: - """Returns the number of embeddings in the database""" - resp = requests.get( - self._api_url + "/collections/" + str(collection_id) + "/count" - ) - raise_chroma_error(resp) - return cast(int, resp.json()) - - @override - def _peek(self, collection_id: UUID, n: int = 10) -> GetResult: - return self._get( - collection_id, - limit=n, - include=["embeddings", "documents", "metadatas"], - ) - - @override - def _get( - self, - collection_id: UUID, - ids: Optional[IDs] = None, - where: Optional[Where] = {}, - sort: Optional[str] = None, - limit: Optional[int] = None, - offset: Optional[int] = None, - page: Optional[int] = None, - page_size: Optional[int] = None, - where_document: Optional[WhereDocument] = {}, - include: Include = ["metadatas", "documents"], - ) -> GetResult: - """Gets embeddings from the database""" - if page and page_size: - offset = (page - 1) * page_size - limit = page_size - - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/get", - data=json.dumps( - { - "ids": ids, - "where": where, - "sort": sort, - "limit": limit, - "offset": offset, - "where_document": where_document, - "include": include, - } - ), - ) - - raise_chroma_error(resp) - body = resp.json() - return GetResult( - ids=body["ids"], - embeddings=body.get("embeddings", None), - metadatas=body.get("metadatas", None), - documents=body.get("documents", None), - ) - - @override - def _delete( - self, - collection_id: UUID, - ids: Optional[IDs] = None, - where: Optional[Where] = {}, - where_document: Optional[WhereDocument] = {}, - ) -> IDs: - """Deletes embeddings from the database""" - - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/delete", - data=json.dumps( - {"where": where, "ids": ids, "where_document": where_document} - ), - ) - - raise_chroma_error(resp) - return cast(IDs, resp.json()) - - @override - def _add( - self, - ids: IDs, - collection_id: UUID, - embeddings: Embeddings, - metadatas: Optional[Metadatas] = None, - documents: Optional[Documents] = None, - increment_index: bool = True, - ) -> bool: - """ - Adds a batch of embeddings to the database - - pass in column oriented data lists - - by default, the index is progressively built up as you add more data. If for ingestion performance reasons you want to disable this, set increment_index to False - - and then manually create the index yourself with collection.create_index() - """ - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/add", - data=json.dumps( - { - "ids": ids, - "embeddings": embeddings, - "metadatas": metadatas, - "documents": documents, - "increment_index": increment_index, - } - ), - ) - - raise_chroma_error(resp) - return True - - @override - def _update( - self, - collection_id: UUID, - ids: IDs, - embeddings: Optional[Embeddings] = None, - metadatas: Optional[Metadatas] = None, - documents: Optional[Documents] = None, - ) -> bool: - """ - Updates a batch of embeddings in the database - - pass in column oriented data lists - """ - - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/update", - data=json.dumps( - { - "ids": ids, - "embeddings": embeddings, - "metadatas": metadatas, - "documents": documents, - } - ), - ) - - resp.raise_for_status() - return True - - @override - def _upsert( - self, - collection_id: UUID, - ids: IDs, - embeddings: Embeddings, - metadatas: Optional[Metadatas] = None, - documents: Optional[Documents] = None, - increment_index: bool = True, - ) -> bool: - """ - Updates a batch of embeddings in the database - - pass in column oriented data lists - """ - - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/upsert", - data=json.dumps( - { - "ids": ids, - "embeddings": embeddings, - "metadatas": metadatas, - "documents": documents, - "increment_index": increment_index, - } - ), - ) - - resp.raise_for_status() - return True - - @override - def _query( - self, - collection_id: UUID, - query_embeddings: Embeddings, - n_results: int = 10, - where: Optional[Where] = {}, - where_document: Optional[WhereDocument] = {}, - include: Include = ["metadatas", "documents", "distances"], - ) -> QueryResult: - """Gets the nearest neighbors of a single embedding""" - - resp = requests.post( - self._api_url + "/collections/" + str(collection_id) + "/query", - data=json.dumps( - { - "query_embeddings": query_embeddings, - "n_results": n_results, - "where": where, - "where_document": where_document, - "include": include, - } - ), - ) - - raise_chroma_error(resp) - body = resp.json() - - return QueryResult( - ids=body["ids"], - distances=body.get("distances", None), - embeddings=body.get("embeddings", None), - metadatas=body.get("metadatas", None), - documents=body.get("documents", None), - ) - - @override - def reset(self) -> None: - """Resets the database""" - resp = requests.post(self._api_url + "/reset") - raise_chroma_error(resp) - - @override - def persist(self) -> bool: - """Persists the database""" - resp = requests.post(self._api_url + "/persist") - raise_chroma_error(resp) - return cast(bool, resp.json()) - - @override - def raw_sql(self, sql: str) -> pd.DataFrame: - """Runs a raw SQL query against the database""" - resp = requests.post( - self._api_url + "/raw_sql", data=json.dumps({"raw_sql": sql}) - ) - raise_chroma_error(resp) - return pd.DataFrame.from_dict(resp.json()) - - @override - def create_index(self, collection_name: str) -> bool: - """Creates an index for the given space key""" - resp = requests.post( - self._api_url + "/collections/" + collection_name + "/create_index" - ) - raise_chroma_error(resp) - return cast(bool, resp.json()) - - @override - def get_version(self) -> str: - """Returns the version of the server""" - resp = requests.get(self._api_url + "/version") - raise_chroma_error(resp) - return cast(str, resp.json()) - - -def raise_chroma_error(resp: requests.Response) -> None: - """Raises an error if the response is not ok, using a ChromaError if possible""" - if resp.ok: - return - - chroma_error = None - try: - body = resp.json() - if "error" in body: - if body["error"] in errors.error_types: - chroma_error = errors.error_types[body["error"]](body["message"]) - - except BaseException: - pass - - if chroma_error: - raise chroma_error - - try: - resp.raise_for_status() - except requests.HTTPError: - raise (Exception(resp.text)) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/__init__.py deleted file mode 100644 index f364badd886f5c61ef1e19cb449bc28c8d196df8..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import clickhouse_connect.cc_sqlalchemy.datatypes.sqltypes diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/base_doc/io/json.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/base_doc/io/json.py deleted file mode 100644 index 27468b2b61c7e6543c18091a8834bacb2f729b52..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/base_doc/io/json.py +++ /dev/null @@ -1,29 +0,0 @@ -import orjson -from pydantic.json import ENCODERS_BY_TYPE - - -def _default_orjson(obj): - """ - default option for orjson dumps. - :param obj: - :return: return a json compatible object - """ - from docarray.base_doc import BaseNode - - if isinstance(obj, BaseNode): - return obj._docarray_to_json_compatible() - else: - for cls_, encoder in ENCODERS_BY_TYPE.items(): - if isinstance(obj, cls_): - return encoder(obj) - return obj - - -def orjson_dumps(v, *, default=None) -> bytes: - # dumps to bytes using orjson - return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY) - - -def orjson_dumps_and_decode(v, *, default=None) -> str: - # dumps to bytes using orjson - return orjson_dumps(v, default=default).decode() diff --git a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/utils/__init__.py b/spaces/Suniilkumaar/MusicGen-updated/audiocraft/utils/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/ext_loader.py deleted file mode 100644 index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/ext_loader.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import importlib -import os -import pkgutil -import warnings -from collections import namedtuple - -import torch - -if torch.__version__ != 'parrots': - - def load_ext(name, funcs): - ext = importlib.import_module('mmcv.' + name) - for fun in funcs: - assert hasattr(ext, fun), f'{fun} miss in module {name}' - return ext -else: - from parrots import extension - from parrots.base import ParrotsException - - has_return_value_ops = [ - 'nms', - 'softnms', - 'nms_match', - 'nms_rotated', - 'top_pool_forward', - 'top_pool_backward', - 'bottom_pool_forward', - 'bottom_pool_backward', - 'left_pool_forward', - 'left_pool_backward', - 'right_pool_forward', - 'right_pool_backward', - 'fused_bias_leakyrelu', - 'upfirdn2d', - 'ms_deform_attn_forward', - 'pixel_group', - 'contour_expand', - ] - - def get_fake_func(name, e): - - def fake_func(*args, **kwargs): - warnings.warn(f'{name} is not supported in parrots now') - raise e - - return fake_func - - def load_ext(name, funcs): - ExtModule = namedtuple('ExtModule', funcs) - ext_list = [] - lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - for fun in funcs: - try: - ext_fun = extension.load(fun, name, lib_dir=lib_root) - except ParrotsException as e: - if 'No element registered' not in e.message: - warnings.warn(e.message) - ext_fun = get_fake_func(fun, e) - ext_list.append(ext_fun) - else: - if fun in has_return_value_ops: - ext_list.append(ext_fun.op) - else: - ext_list.append(ext_fun.op_) - return ExtModule(*ext_list) - - -def check_ops_exist(): - ext_loader = pkgutil.find_loader('mmcv._ext') - return ext_loader is not None diff --git a/spaces/TNR-5/Search/search.php b/spaces/TNR-5/Search/search.php deleted file mode 100644 index 084a9ed2c39d99a2e6024a233f5a3a49d1efdf2e..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/Search/search.php +++ /dev/null @@ -1,7 +0,0 @@ - diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_compat.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_compat.py deleted file mode 100644 index 8b5b1d280f3c7b45cee54338f60d5271a7510c2e..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_compat.py +++ /dev/null @@ -1,108 +0,0 @@ -# flake8: noqa - -import abc -import os -import sys -import pathlib -from contextlib import suppress -from typing import Union - - -if sys.version_info >= (3, 10): - from zipfile import Path as ZipPath # type: ignore -else: - from ..zipp import Path as ZipPath # type: ignore - - -try: - from typing import runtime_checkable # type: ignore -except ImportError: - - def runtime_checkable(cls): # type: ignore - return cls - - -try: - from typing import Protocol # type: ignore -except ImportError: - Protocol = abc.ABC # type: ignore - - -class TraversableResourcesLoader: - """ - Adapt loaders to provide TraversableResources and other - compatibility. - - Used primarily for Python 3.9 and earlier where the native - loaders do not yet implement TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - @property - def path(self): - return self.spec.origin - - def get_resource_reader(self, name): - from . import readers, _adapters - - def _zip_reader(spec): - with suppress(AttributeError): - return readers.ZipReader(spec.loader, spec.name) - - def _namespace_reader(spec): - with suppress(AttributeError, ValueError): - return readers.NamespaceReader(spec.submodule_search_locations) - - def _available_reader(spec): - with suppress(AttributeError): - return spec.loader.get_resource_reader(spec.name) - - def _native_reader(spec): - reader = _available_reader(spec) - return reader if hasattr(reader, 'files') else None - - def _file_reader(spec): - try: - path = pathlib.Path(self.path) - except TypeError: - return None - if path.exists(): - return readers.FileReader(self) - - return ( - # native reader if it supplies 'files' - _native_reader(self.spec) - or - # local ZipReader if a zip module - _zip_reader(self.spec) - or - # local NamespaceReader if a namespace module - _namespace_reader(self.spec) - or - # local FileReader - _file_reader(self.spec) - # fallback - adapt the spec ResourceReader to TraversableReader - or _adapters.CompatibilityFiles(self.spec) - ) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - - Supersedes _adapters.wrap_spec to use TraversableResourcesLoader - from above for older Python compatibility (<3.10). - """ - from . import _adapters - - return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) - - -if sys.version_info >= (3, 9): - StrPath = Union[str, os.PathLike[str]] -else: - # PathLike is only subscriptable at runtime in 3.9+ - StrPath = Union[str, "os.PathLike[str]"] diff --git a/spaces/TheSxrynlxX/Idk/Dockerfile b/spaces/TheSxrynlxX/Idk/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/TheSxrynlxX/Idk/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/ThomasSimonini/Huggy/README.md b/spaces/ThomasSimonini/Huggy/README.md deleted file mode 100644 index ae6e4344e5d1308e86ed91a8bbaf9d6996efc978..0000000000000000000000000000000000000000 --- a/spaces/ThomasSimonini/Huggy/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Huggy -emoji: 🐶 -colorFrom: red -colorTo: indigo -sdk: static -pinned: false -license: cc-by-nc-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Usually3/text-to-image/README.md b/spaces/Usually3/text-to-image/README.md deleted file mode 100644 index fa4f13e9e0950ab60068bf71637addb8cb7f7c53..0000000000000000000000000000000000000000 --- a/spaces/Usually3/text-to-image/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text To Image -emoji: 🏢 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Wanlau/sovits-4.0_datealive/resample.py b/spaces/Wanlau/sovits-4.0_datealive/resample.py deleted file mode 100644 index f84119cd239b49d260ed1d9e367206adcc3aa03d..0000000000000000000000000000000000000000 --- a/spaces/Wanlau/sovits-4.0_datealive/resample.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - # speaker 's5', 'p280', 'p315' are excluded, - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=None) - wav, _ = librosa.effects.trim(wav, top_db=20) - peak = np.abs(wav).max() - if peak > 1.0: - wav = 0.98 * wav / peak - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2) - wav2 /= max(wav2.max(), -wav2.min()) - save_name = wav_name - save_path2 = os.path.join(args.out_dir2, speaker, save_name) - wavfile.write( - save_path2, - args.sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr2", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir") - parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir") - args = parser.parse_args() - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/siggraph17.py b/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/siggraph17.py deleted file mode 100644 index 775a23f25d03f3bf1761e5d2bbf4b400eb2c6047..0000000000000000000000000000000000000000 --- a/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/siggraph17.py +++ /dev/null @@ -1,168 +0,0 @@ -import torch -import torch.nn as nn - -from .base_color import * - -class SIGGRAPHGenerator(BaseColor): - def __init__(self, norm_layer=nn.BatchNorm2d, classes=529): - super(SIGGRAPHGenerator, self).__init__() - - # Conv1 - model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[norm_layer(64),] - # add a subsampling operation - - # Conv2 - model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[norm_layer(128),] - # add a subsampling layer operation - - # Conv3 - model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[norm_layer(256),] - # add a subsampling layer operation - - # Conv4 - model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[norm_layer(512),] - - # Conv5 - model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[norm_layer(512),] - - # Conv6 - model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[norm_layer(512),] - - # Conv7 - model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[norm_layer(512),] - - # Conv7 - model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)] - model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - - model8=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[norm_layer(256),] - - # Conv9 - model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),] - model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - # add the two feature maps above - - model9=[nn.ReLU(True),] - model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model9+=[nn.ReLU(True),] - model9+=[norm_layer(128),] - - # Conv10 - model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),] - model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] - # add the two feature maps above - - model10=[nn.ReLU(True),] - model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),] - model10+=[nn.LeakyReLU(negative_slope=.2),] - - # classification output - model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] - - # regression output - model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] - model_out+=[nn.Tanh()] - - self.model1 = nn.Sequential(*model1) - self.model2 = nn.Sequential(*model2) - self.model3 = nn.Sequential(*model3) - self.model4 = nn.Sequential(*model4) - self.model5 = nn.Sequential(*model5) - self.model6 = nn.Sequential(*model6) - self.model7 = nn.Sequential(*model7) - self.model8up = nn.Sequential(*model8up) - self.model8 = nn.Sequential(*model8) - self.model9up = nn.Sequential(*model9up) - self.model9 = nn.Sequential(*model9) - self.model10up = nn.Sequential(*model10up) - self.model10 = nn.Sequential(*model10) - self.model3short8 = nn.Sequential(*model3short8) - self.model2short9 = nn.Sequential(*model2short9) - self.model1short10 = nn.Sequential(*model1short10) - - self.model_class = nn.Sequential(*model_class) - self.model_out = nn.Sequential(*model_out) - - self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),]) - self.softmax = nn.Sequential(*[nn.Softmax(dim=1),]) - - def forward(self, input_A, input_B=None, mask_B=None): - if(input_B is None): - input_B = torch.cat((input_A*0, input_A*0), dim=1) - if(mask_B is None): - mask_B = input_A*0 - - conv1_2 = self.model1(torch.cat((self.normalize_l(input_A),self.normalize_ab(input_B),mask_B),dim=1)) - conv2_2 = self.model2(conv1_2[:,:,::2,::2]) - conv3_3 = self.model3(conv2_2[:,:,::2,::2]) - conv4_3 = self.model4(conv3_3[:,:,::2,::2]) - conv5_3 = self.model5(conv4_3) - conv6_3 = self.model6(conv5_3) - conv7_3 = self.model7(conv6_3) - - conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3) - conv8_3 = self.model8(conv8_up) - conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) - conv9_3 = self.model9(conv9_up) - conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) - conv10_2 = self.model10(conv10_up) - out_reg = self.model_out(conv10_2) - - conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) - conv9_3 = self.model9(conv9_up) - conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) - conv10_2 = self.model10(conv10_up) - out_reg = self.model_out(conv10_2) - - return self.unnormalize_ab(out_reg) - -def siggraph17(pretrained=True): - model = SIGGRAPHGenerator() - if(pretrained): - import torch.utils.model_zoo as model_zoo - model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True)) - return model - diff --git a/spaces/XingHe0127/Chatbot/modules/llama_func.py b/spaces/XingHe0127/Chatbot/modules/llama_func.py deleted file mode 100644 index aec202a851c8ec51d1a96ce23320919af0d22a95..0000000000000000000000000000000000000000 --- a/spaces/XingHe0127/Chatbot/modules/llama_func.py +++ /dev/null @@ -1,166 +0,0 @@ -import os -import logging - -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -import colorama -import PyPDF2 -from tqdm import tqdm - -from modules.presets import * -from modules.utils import * -from modules.config import local_embedding - - -def get_index_name(file_src): - file_paths = [x.name for x in file_src] - file_paths.sort(key=lambda x: os.path.basename(x)) - - md5_hash = hashlib.md5() - for file_path in file_paths: - with open(file_path, "rb") as f: - while chunk := f.read(8192): - md5_hash.update(chunk) - - return md5_hash.hexdigest() - - -def block_split(text): - blocks = [] - while len(text) > 0: - blocks.append(Document(text[:1000])) - text = text[1000:] - return blocks - - -def get_documents(file_src): - documents = [] - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - filepath = file.name - filename = os.path.basename(filepath) - file_type = os.path.splitext(filepath)[1] - logging.info(f"loading file: {filename}") - try: - if file_type == ".pdf": - logging.debug("Loading PDF...") - try: - from modules.pdf_func import parse_pdf - from modules.config import advance_docs - - two_column = advance_docs["pdf"].get("two_column", False) - pdftext = parse_pdf(filepath, two_column).text - except: - pdftext = "" - with open(filepath, "rb") as pdfFileObj: - pdfReader = PyPDF2.PdfReader(pdfFileObj) - for page in tqdm(pdfReader.pages): - pdftext += page.extract_text() - text_raw = pdftext - elif file_type == ".docx": - logging.debug("Loading Word...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - text_raw = loader.load_data(file=filepath)[0].text - elif file_type == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - text_raw = loader.load_data(file=filepath)[0].text - elif file_type == ".xlsx": - logging.debug("Loading Excel...") - text_list = excel_to_string(filepath) - for elem in text_list: - documents.append(Document(elem)) - continue - else: - logging.debug("Loading text file...") - with open(filepath, "r", encoding="utf-8") as f: - text_raw = f.read() - except Exception as e: - logging.error(f"Error loading file: {filename}") - pass - text = add_space(text_raw) - # text = block_split(text) - # documents += text - documents += [Document(text)] - logging.debug("Documents loaded.") - return documents - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=5, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", -): - from langchain.chat_models import ChatOpenAI - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from llama_index import GPTSimpleVectorIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding - - if api_key: - os.environ["OPENAI_API_KEY"] = api_key - else: - # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY - os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx" - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - prompt_helper = PromptHelper( - max_input_size=max_input_size, - num_output=num_outputs, - max_chunk_overlap=max_chunk_overlap, - embedding_limit=embedding_limit, - chunk_size_limit=600, - separator=separator, - ) - index_name = get_index_name(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - documents = get_documents(file_src) - if local_embedding: - embed_model = LangchainEmbedding(HuggingFaceEmbeddings()) - else: - embed_model = OpenAIEmbedding() - logging.info("构建索引中……") - with retrieve_proxy(): - service_context = ServiceContext.from_defaults( - prompt_helper=prompt_helper, - chunk_size_limit=chunk_size_limit, - embed_model=embed_model, - ) - index = GPTSimpleVectorIndex.from_documents( - documents, service_context=service_context - ) - logging.debug("索引构建完成!") - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - logging.debug("索引已保存至本地!") - return index - - except Exception as e: - logging.error("索引构建失败!", e) - print(e) - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text diff --git a/spaces/XzJosh/maimai-Bert-VITS2/utils.py b/spaces/XzJosh/maimai-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/maimai-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/segment/dataloaders.py b/spaces/YONG627/456123/yolov5-code-main/utils/segment/dataloaders.py deleted file mode 100644 index 097a5d5cb058bed4ba4661217702ea4751ff7ff4..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/segment/dataloaders.py +++ /dev/null @@ -1,332 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Dataloaders -""" - -import os -import random - -import cv2 -import numpy as np -import torch -from torch.utils.data import DataLoader, distributed - -from ..augmentations import augment_hsv, copy_paste, letterbox -from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker -from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn -from ..torch_utils import torch_distributed_zero_first -from .augmentations import mixup, random_perspective - -RANK = int(os.getenv('RANK', -1)) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - mask_downsample_ratio=1, - overlap_mask=False, - seed=0): - if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') - shuffle = False - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabelsAndMasks( - path, - imgsz, - batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix, - downsample_ratio=mask_downsample_ratio, - overlap=overlap_mask) - - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - generator = torch.Generator() - generator.manual_seed(6148914691236517205 + seed + RANK) - return loader( - dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, - worker_init_fn=seed_worker, - generator=generator, - ), dataset - - -class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing - - def __init__( - self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0, - min_items=0, - prefix='', - downsample_ratio=1, - overlap=False, - ): - super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, min_items, prefix) - self.downsample_ratio = downsample_ratio - self.overlap = overlap - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - masks = [] - if mosaic: - # Load mosaic - img, labels, segments = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp['mixup']: - img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy - segments = self.segments[index].copy() - if len(segments): - for i_s in range(len(segments)): - segments[i_s] = xyn2xy( - segments[i_s], - ratio[0] * w, - ratio[1] * h, - padw=pad[0], - padh=pad[1], - ) - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels, segments = random_perspective(img, - labels, - segments=segments, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) - if self.overlap: - masks, sorted_idx = polygons2masks_overlap(img.shape[:2], - segments, - downsample_ratio=self.downsample_ratio) - masks = masks[None] # (640, 640) -> (1, 640, 640) - labels = labels[sorted_idx] - else: - masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) - - masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // - self.downsample_ratio, img.shape[1] // - self.downsample_ratio)) - # TODO: albumentations support - if self.augment: - # Albumentations - # there are some augmentation that won't change boxes and masks, - # so just be it for now. - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - masks = torch.flip(masks, dims=[1]) - - # Flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - masks = torch.flip(masks, dims=[2]) - - # Cutouts # labels = cutout(img, labels, p=0.5) - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) - - def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic - labels4, segments4 = [], [] - s = self.img_size - yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y - - # 3 additional image indices - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - labels, segments = self.labels[index].copy(), self.segments[index].copy() - - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4, segments4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - return img4, labels4, segments4 - - @staticmethod - def collate_fn(batch): - img, label, path, shapes, masks = zip(*batch) # transposed - batched_masks = torch.cat(masks, 0) - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks - - -def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (np.ndarray): [N, M], N is the number of polygons, - M is the number of points(Be divided by 2). - """ - mask = np.zeros(img_size, dtype=np.uint8) - polygons = np.asarray(polygons) - polygons = polygons.astype(np.int32) - shape = polygons.shape - polygons = polygons.reshape(shape[0], -1, 2) - cv2.fillPoly(mask, polygons, color=color) - nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) - # NOTE: fillPoly firstly then resize is trying the keep the same way - # of loss calculation when mask-ratio=1. - mask = cv2.resize(mask, (nw, nh)) - return mask - - -def polygons2masks(img_size, polygons, color, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (list[np.ndarray]): each polygon is [N, M], - N is the number of polygons, - M is the number of points(Be divided by 2). - """ - masks = [] - for si in range(len(polygons)): - mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) - masks.append(mask) - return np.array(masks) - - -def polygons2masks_overlap(img_size, segments, downsample_ratio=1): - """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), - dtype=np.int32 if len(segments) > 255 else np.uint8) - areas = [] - ms = [] - for si in range(len(segments)): - mask = polygon2mask( - img_size, - [segments[si].reshape(-1)], - downsample_ratio=downsample_ratio, - color=1, - ) - ms.append(mask) - areas.append(mask.sum()) - areas = np.asarray(areas) - index = np.argsort(-areas) - ms = np.array(ms)[index] - for i in range(len(segments)): - mask = ms[i] * (i + 1) - masks = masks + mask - masks = np.clip(masks, a_min=0, a_max=i + 1) - return masks, index diff --git a/spaces/Yarumo/whisper/share_btn.py b/spaces/Yarumo/whisper/share_btn.py deleted file mode 100644 index dff74adcc3c750c4e7a2cbd6fca31dff1dd62f1a..0000000000000000000000000000000000000000 --- a/spaces/Yarumo/whisper/share_btn.py +++ /dev/null @@ -1,203 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': 'audio/wav', - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - function audioResample(buffer, sampleRate){ - const offlineCtx = new OfflineAudioContext(2, (buffer.length / buffer.sampleRate) * sampleRate, sampleRate); - const source = offlineCtx.createBufferSource(); - source.buffer = buffer; - source.connect(offlineCtx.destination); - source.start(); - return offlineCtx.startRendering(); - }; - - function audioReduceChannels(buffer, targetChannelOpt){ - if(targetChannelOpt === 'both' || buffer.numberOfChannels < 2) return buffer; - const outBuffer = new AudioBuffer({ - sampleRate: buffer.sampleRate, - length: buffer.length, - numberOfChannels: 1 - }); - - const data = [buffer.getChannelData(0), buffer.getChannelData(1)]; - const newData = new Float32Array(buffer.length); - for(let i = 0; i < buffer.length; ++i) - newData[i] = - targetChannelOpt === 'left'? data[0][i] : - targetChannelOpt === 'right'? data[1][i] : - (data[0][i] + data[1][i]) / 2 ; - outBuffer.copyToChannel(newData, 0); - return outBuffer; - }; - - function audioNormalize(buffer){ - const data = Array.from(Array(buffer.numberOfChannels)).map((_, idx) => buffer.getChannelData(idx)); - const maxAmplitude = Math.max(...data.map(chan => chan.reduce((acc, cur) => Math.max(acc, Math.abs(cur)), 0))); - if(maxAmplitude >= 1.0) return buffer; - const coeff = 1.0 / maxAmplitude; - data.forEach(chan => { - chan.forEach((v, idx) => chan[idx] = v*coeff); - buffer.copyToChannel(chan, 0); - }); - return buffer; - }; - - async function processAudioFile( - audioBufferIn, - targetChannelOpt, - targetSampleRate - ) { - const resampled = await audioResample(audioBufferIn, targetSampleRate); - const reduced = audioReduceChannels(resampled, targetChannelOpt); - const normalized = audioNormalize(reduced); - return normalized; - } - - function audioToRawWave(audioChannels, bytesPerSample, mixChannels=false) { - const bufferLength = audioChannels[0].length; - const numberOfChannels = audioChannels.length === 1 ? 1 : 2; - const reducedData = new Uint8Array( - bufferLength * numberOfChannels * bytesPerSample - ); - for (let i = 0; i < bufferLength; ++i) { - for ( - let channel = 0; - channel < (mixChannels ? 1 : numberOfChannels); - ++channel - ) { - const outputIndex = (i * numberOfChannels + channel) * bytesPerSample; - let sample; - if (!mixChannels) sample = audioChannels[channel][i]; - else - sample = - audioChannels.reduce((prv, cur) => prv + cur[i], 0) / - numberOfChannels; - sample = sample > 1 ? 1 : sample < -1 ? -1 : sample; //check for clipping - //bit reduce and convert to Uint8 - switch (bytesPerSample) { - case 2: - sample = sample * 32767; - reducedData[outputIndex] = sample; - reducedData[outputIndex + 1] = sample >> 8; - break; - case 1: - reducedData[outputIndex] = (sample + 1) * 127; - break; - default: - throw "Only 8, 16 bits per sample are supported"; - } - } - } - return reducedData; - } - - function makeWav(data, channels, sampleRate, bytesPerSample) { - const headerLength = 44; - var wav = new Uint8Array(headerLength + data.length); - var view = new DataView(wav.buffer); - - view.setUint32(0, 1380533830, false); // RIFF identifier 'RIFF' - view.setUint32(4, 36 + data.length, true); // file length minus RIFF identifier length and file description length - view.setUint32(8, 1463899717, false); // RIFF type 'WAVE' - view.setUint32(12, 1718449184, false); // format chunk identifier 'fmt ' - view.setUint32(16, 16, true); // format chunk length - view.setUint16(20, 1, true); // sample format (raw) - view.setUint16(22, channels, true); // channel count - view.setUint32(24, sampleRate, true); // sample rate - view.setUint32(28, sampleRate * bytesPerSample * channels, true); // byte rate (sample rate * block align) - view.setUint16(32, bytesPerSample * channels, true); // block align (channel count * bytes per sample) - view.setUint16(34, bytesPerSample * 8, true); // bits per sample - view.setUint32(36, 1684108385, false); // data chunk identifier 'data' - view.setUint32(40, data.length, true); // data chunk length - - wav.set(data, headerLength); - - return new Blob([wav.buffer], { type: "audio/wav" }); - } - - const gradioEl = document.querySelector('body > gradio-app'); - const audioEl = gradioEl.querySelector('audio'); - const resultTxt = gradioEl.querySelector('#result-textarea textarea').value; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!audioEl){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const res = await fetch(audioEl.src); - const blob = await res.blob(); - - const channelOpt = "both"; - const sampleRate = 48000; - const bytesPerSample = 1; // or 2 - const audioBufferIn = await new AudioContext().decodeAudioData( - await blob.arrayBuffer() - ); - const audioBuffer = await processAudioFile( - audioBufferIn, - channelOpt, - sampleRate - ); - const rawData = audioToRawWave( - channelOpt === "both" - ? [audioBuffer.getChannelData(0), audioBuffer.getChannelData(1)] - : [audioBuffer.getChannelData(0)], - bytesPerSample - ); - const blobWav = makeWav( - rawData, - channelOpt === "both" ? 2 : 1, - sampleRate, - bytesPerSample - ); - - const fileName = `whisper-demo-input.wav`; - const audioFile = new File([blobWav], fileName, { type: 'audio/wav' }); - - const url = await uploadFile(audioFile); - - const descriptionMd = `#### Input audio: - - -#### Transcription: - -> ${resultTxt}`; - - const params = new URLSearchParams({ - description: descriptionMd, - }); - - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/openai/whisper/discussions/new?${paramsStr}`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/YlcldKlns/bing/src/components/chat-panel.tsx b/spaces/YlcldKlns/bing/src/components/chat-panel.tsx deleted file mode 100644 index 56b2112bd75ba08134383871177851fa2e3f43a4..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/components/chat-panel.tsx +++ /dev/null @@ -1,153 +0,0 @@ -'use client' - -import * as React from 'react' -import Image from 'next/image' -import Textarea from 'react-textarea-autosize' -import { useAtomValue } from 'jotai' -import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' -import { cn } from '@/lib/utils' - -import BrushIcon from '@/assets/images/brush.svg' -import ChatIcon from '@/assets/images/chat.svg' -import VisualSearchIcon from '@/assets/images/visual-search.svg' -import SendIcon from '@/assets/images/send.svg' -import PinIcon from '@/assets/images/pin.svg' -import PinFillIcon from '@/assets/images/pin-fill.svg' - -import { useBing } from '@/lib/hooks/use-bing' -import { voiceListenAtom } from '@/state' -import Voice from './voice' -import { ChatImage } from './chat-image' -import { ChatAttachments } from './chat-attachments' - -export interface ChatPanelProps - extends Pick< - ReturnType, - | 'generating' - | 'input' - | 'setInput' - | 'sendMessage' - | 'resetConversation' - | 'isSpeaking' - | 'attachmentList' - | 'uploadImage' - | 'setAttachmentList' - > { - id?: string - className?: string -} - -export function ChatPanel({ - isSpeaking, - generating, - input, - setInput, - className, - sendMessage, - resetConversation, - attachmentList, - uploadImage, - setAttachmentList -}: ChatPanelProps) { - const inputRef = React.useRef(null) - const {formRef, onKeyDown} = useEnterSubmit() - const [focused, setFocused] = React.useState(false) - const [active, setActive] = React.useState(false) - const [pin, setPin] = React.useState(false) - const [tid, setTid] = React.useState() - const voiceListening = useAtomValue(voiceListenAtom) - - const setBlur = React.useCallback(() => { - clearTimeout(tid) - setActive(false) - const _tid = setTimeout(() => setFocused(false), 2000); - setTid(_tid) - }, [tid]) - - const setFocus = React.useCallback(() => { - setFocused(true) - setActive(true) - clearTimeout(tid) - inputRef.current?.focus() - }, [tid]) - - React.useEffect(() => { - if (input) { - setFocus() - } - }, [input, setFocus]) - - return ( -
{ - e.preventDefault() - if (generating) { - return; - } - if (!input?.trim()) { - return - } - setInput('') - setPin(false) - await sendMessage(input) - }} - ref={formRef} - > -
-
-
-
-
-
-
- -
-
-
-
- -
BrandModel
AppleiPhone 6 and later
GooglePixel 4 and later
SamsungGalaxy S9 and later
LGG8 ThinQ and later
MotoG Power and later
NokiaC5 Endi and later
TCLTCL 10 Pro and later
Z

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md deleted file mode 100644 index fe00faef9d89ebd9622966d90f2d9fd28f416fe6..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md +++ /dev/null @@ -1,166 +0,0 @@ - -

Download Diablo 2 Fury Within 1.09: A Guide for Fans of the Classic Action RPG

-

If you are a fan of Diablo 2, one of the most popular and influential action role-playing games of all time, you might be interested in trying out a mod that adds new content, features, and challenges to the game. Diablo 2 Fury Within 1.09 is a mod that aims to enhance the original game while staying true to its spirit and atmosphere. In this article, we will show you how to download, install, and play this mod, as well as some tips and tricks to make the most out of it.

-

download diablo 2 fury within 1.09


Download ———>>> https://byltly.com/2uKyDq



-

What is Diablo 2 Fury Within 1.09?

-

Diablo 2 Fury Within 1.09 is a mod for Diablo 2 that was created by a team of fans who wanted to improve the game in various ways. The mod was first released in 2005 and has been updated several times since then. The latest version, 1.09, was released in 2019.

-

The mod adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable.

-

How to download Diablo 2 Fury Within 1.09?

-

To download Diablo 2 Fury Within 1.09, you will need a few things:

-
    -
  • A copy of Diablo 2 (preferably version 1.10 or higher)
  • -
  • A copy of Diablo 2 Lord of Destruction expansion (preferably version 1.10 or higher)
  • -
  • A ZIP file extractor program such as WinRAR or 7-Zip
  • -
  • A reliable internet connection
  • -
-

Once you have these things ready, you can follow these steps:

-

Requirements and compatibility

-

Before you download the mod, you should check if your system meets the minimum requirements to run it. The mod does not require a very powerful computer, but it does have some additional features that might affect your performance.

-

The minimum requirements are:

-
    -
  • Windows XP or higher
  • -
  • Pentium III or higher
  • -
  • 512 MB RAM or higher
  • -
  • DirectX compatible sound card
  • -
  • DirectX compatible video card with at least 32 MB VRAM
  • -
  • 4 GB free hard disk space
  • -
-

You should also check if your version of Diablo 2 is compatible with the mod. The mod works best with version 1.10 or higher of both Diablo 2 and Lord of Destruction expansion. If you have an older version, you might encounter some issues or bugs.

-

To check your version of Diablo 2, you can open the game launcher and look at the bottom left corner of the screen. You should see something like "Version x.xx". If you have an older version than 1.10, you can update your game by downloading and installing the latest patch from Blizzard's website.

-

How to download diablo 2 fury within 1.09 for free
-Diablo 2 fury within 1.09 download full version
-Download diablo 2 fury within 1.09 mod with new features
-Diablo 2 fury within 1.09 download link and installation guide
-Download diablo 2 fury within 1.09 patch and fix bugs
-Diablo 2 fury within 1.09 download torrent and crack
-Download diablo 2 fury within 1.09 online and play with friends
-Diablo 2 fury within 1.09 download review and rating
-Download diablo 2 fury within 1.09 cheats and hacks
-Diablo 2 fury within 1.09 download system requirements and compatibility
-Download diablo 2 fury within 1.09 soundtrack and wallpapers
-Diablo 2 fury within 1.09 download tips and tricks
-Download diablo 2 fury within 1.09 maps and items
-Diablo 2 fury within 1.09 download best builds and skills
-Download diablo 2 fury within 1.09 characters and classes
-Diablo 2 fury within 1.09 download lore and story
-Download diablo 2 fury within 1.09 update and changelog
-Diablo 2 fury within 1.09 download comparison and alternatives
-Download diablo 2 fury within 1.09 forum and community
-Diablo 2 fury within 1.09 download error and solution
-Download diablo 2 fury within 1.09 trainer and editor
-Diablo 2 fury within 1.09 download speed and performance
-Download diablo 2 fury within 1.09 video and screenshots
-Diablo 2 fury within 1.09 download size and file format
-Download diablo 2 fury within 1.09 backup and restore
-Diablo 2 fury within 1.09 download bonus and rewards
-Download diablo 2 fury within 1.09 mods and addons
-Diablo 2 fury within 1.09 download multiplayer and co-op
-Download diablo 2 fury within 1.09 difficulty and challenge
-Diablo 2 fury within 1.09 download secrets and easter eggs
-Download diablo 2 fury within 1.09 guide and walkthrough
-Diablo 2 fury within 1.09 download fun and entertainment
-Download diablo 2 fury within 1.09 history and development
-Diablo 2 fury within 1.09 download support and feedback
-Download diablo 2 fury within 1.09 news and updates
-Diablo

-

Download links and sources

-

Once you have verified your requirements and compatibility, you can proceed to download the mod files. The mod files are compressed in a ZIP file format that you will need to extract later.

-

The official source for downloading the mod is its website: http://furywithin.org/. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates.

-

The direct link for downloading the mod file is: http://furywithin.org/download/FuryWithin109.zip. The file size is about 800 MB.

-

You should always download the mod from its official source or from trusted websites that host it. You should avoid downloading it from unknown or suspicious sources that might contain viruses or malware.

-

You should also verify the authenticity of the file by checking its checksum value. A checksum is a unique code that identifies a file based on its content. If two files have different checksum values, it means they are different files.

-

The official checksum value for the mod file is:

-
MD5: e0c8b7d8c6b0e4c9d6e0b8f6c9e8c9a4 SHA-1: c5d0b7d8c6b0e4c9d6e0b8f6c9e8c9a4 SHA-256: c5d0b7d8c6b0e4c9d6e0b8f6c9e8c9a4 
-

You can use online tools such as https://md5file.com/calculator or https://emn178.github.io/online-tools/sha256_checksum.html to calculate the checksum value of your downloaded file and compare it with the official one.

-

Installation process

-

After you have downloaded and verified the mod file, you can proceed to install it in your Diablo 2 folder. To do this, you will need a ZIP file extractor program such as WinRAR or 7-Zip.

-

You can follow these steps:

-
    -
  1. Locate your downloaded file (FuryWithin109.zip) and right-click on it.
  2. -
  3. Select "Extract Here" or "Extract to FuryWithin109/" depending on your extractor program.
  4. -
  5. You should see a new folder named "FuryWithin109" containing several files and subfolders.
  6. -
  7. Open this folder and select all its contents (Ctrl+A).
  8. -
  9. Copy them (Ctrl+C).
  10. -
  11. Locate your Diablo 2 folder where you installed the game (usually C:\Program Files\Diablo II\).
  12. -
  13. Paste them (Ctrl+V) into your Diablo 2 folder.
  14. -
  15. You should see a prompt asking if you want to replace some existing files with new ones.
  16. -
  17. Select "Yes" or "Yes to All" depending on your extractor program.
  18. -
  19. You have successfully installed the mod in your Diablo 2 folder.
  20. -
-

How to play Diablo 2 Fury Within 1.09?

-

To play Diablo 2 Fury Within 1.09, you just need to launch your Diablo 2 game as usual. You should see a new splash screen with the mod logo and version number.

-

You can create a new character or use an existing one to play the mod. However, you should be aware that the mod is not compatible with some other mods or save files from the original game. You might encounter some errors or crashes if you try to use them.

-

You should also backup your save files before playing the mod, in case you want to revert to the original game or switch to another mod. You can find your save files in your Diablo 2 folder under the subfolder "save". You can copy them to another location for safekeeping.

-

Once you are in the game, you can enjoy the mod and its features. Here are some tips and tricks to help you:

-

New features and changes

-

The mod adds a lot of new content and changes to the game. Some of the main ones are:

-
    -
  • A new difficulty level called "Hellish" that is harder than Hell and has more powerful enemies and rewards.
  • -
  • A new game mode called "Hardcore" that is similar to the original Hardcore mode but with some extra challenges and penalties.
  • -
  • A new option called "Randomize" that allows you to randomize some aspects of the game such as maps, monsters, items, quests, and more.
  • -
  • A new option called "Rebirth" that allows you to reset your character's level, skills, and stats without losing your items or quests.
  • -
  • A new option called "Respec" that allows you to redistribute your skill points and stat points without using any items or quests.
  • -
  • A new option called "Gambling" that allows you to gamble for items using gold or gems.
  • -
  • A new option called "Crafting" that allows you to create new items using materials and recipes.
  • -
  • A new option called "Enchanting" that allows you to enhance your items using runes and charms.
  • -
  • A new option called "Socketing" that allows you to add sockets to your items using jewels and gems.
  • -
  • A new option called "Transmuting" that allows you to transform your items using formulas and catalysts.
  • -
  • A new option called "Trading" that allows you to exchange your items with other players online or offline.
  • -
  • A new option called "Stashing" that allows you to store your items in a shared stash that can be accessed by all your characters.
  • -
  • A new option called "Donating" that allows you to donate your items to a charity box that can be accessed by other players online or offline.
  • -
  • A new option called "Cheating" that allows you to cheat in various ways such as giving yourself gold, items, skills, stats, and more.
  • -
-

You can access these options by clicking on the icons on the bottom right corner of the screen or by pressing the corresponding hotkeys (F1-F12).

-

New classes and skills

-

The mod adds six new classes to the game, each with their own unique skills and playstyles. They are:

-
    -
  • The Paladin: A holy warrior who uses auras, blessings, and smites to fight evil.
  • -
  • The Necromancer: A dark summoner who uses curses, minions, and bones to manipulate death.
  • -
  • The Assassin: A stealthy killer who uses traps, martial arts, and shadow disciplines to strike from the shadows.
  • -
  • The Druid: A nature shifter who uses elemental magic, shape-shifting, and summoning to harness the power of nature.
  • -
  • The Amazon: A skilled archer who uses bows, javelins, spears, and passive skills to hunt down her enemies.
  • -
  • The Barbarian: A fierce warrior who uses swords, axes, maces, and war cries to dominate the battlefield.
  • -
-

You can choose one of these classes when creating a new character or use a Rebirth option to change your existing character's class. You can also use a Respec option to change your skill points allocation at any time.

-

Each class has three skill trees with 10 skills each. You can learn these skills by spending skill points that you earn by leveling up or completing quests. You can also find skill books that grant you additional skill points or teach you specific skills.

-

Some skills have synergies with other skills, meaning they become more powerful when combined together. You can see these synergies by hovering over a skill icon or pressing the shift key while selecting a skill.

-

New items and crafting

-

, materials, recipes, formulas, catalysts, and more. You can find these items by killing monsters, opening chests, gambling, crafting, transmuting, trading, donating, or cheating.

-

Some items have special properties such as prefixes, suffixes, set bonuses, unique effects, ethereal quality, socketed slots, and more. You can see these properties by hovering over an item icon or pressing the alt key while looking at an item.

-

Some items can be upgraded or modified using other items such as runes, charms, jewels, gems, materials, recipes, formulas, catalysts, and more. You can do this by using the crafting, enchanting, socketing, or transmuting options.

-

Crafting is a new feature that allows you to create new items using materials and recipes. Materials are items that can be used as ingredients for crafting. Recipes are items that can be used as instructions for crafting. You can find materials and recipes by killing monsters, opening chests, gambling, transmuting, trading, donating, or cheating.

-

To craft an item, you need to have the required materials and recipe in your inventory. Then you need to click on the crafting icon or press the F6 key to open the crafting window. Here you can see the list of available recipes and their requirements. You can select a recipe and click on the craft button to create the item.

-

, set bonuses, unique effects, ethereal quality, socketed slots, and more.

-

How to troubleshoot Diablo 2 Fury Within 1.09?

-

Diablo 2 Fury Within 1.09 is a mod that modifies the original game in many ways. As such, it might cause some issues or problems for some players. Here are some common issues and solutions for playing the mod:

-

Compatibility issues

-

The mod might not work well with other mods, patches, or versions of Diablo 2. If you have installed or used any other mods or patches before or after installing the mod, you might encounter some errors or crashes.

-

To fix this, you should uninstall or remove any other mods or patches from your Diablo 2 folder. You should also make sure that your version of Diablo 2 and Lord of Destruction expansion is 1.10 or higher. You can update your game by downloading and installing the latest patch from Blizzard's website.

-

Performance issues

-

The mod might affect your game performance in terms of speed, graphics, sound, or stability. If you experience any lag, stuttering, freezing, crashing, or other performance issues while playing the mod, you might need to optimize your settings and system.

-

To fix this, you should lower your game resolution, quality, and sound options in the game menu. You should also close any unnecessary programs or processes running in the background of your computer. You should also scan your computer for viruses or malware that might slow it down.

-

Bug reports and feedback

-

The mod might have some bugs or glitches that affect your gameplay experience. If you encounter any bugs or glitches while playing the mod, you should report them to the mod developers and community.

-

To do this, you should visit the mod website: http://furywithin.org/. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates.

-

, logs, system specifications, and steps to reproduce the issue. You should also be polite and respectful when reporting or giving feedback.

-

Conclusion

-

Diablo 2 Fury Within 1.09 is a mod that enhances the original game in various ways. It adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable.

-

To download and play the mod, you need to have a copy of Diablo 2 and Lord of Destruction expansion with version 1.10 or higher. You also need to download the mod file from its official website and extract and copy it to your Diablo 2 folder. You can then launch your game as usual and enjoy the mod and its features.

-

If you encounter any issues or problems while playing the mod, you can try to fix them by checking your requirements and compatibility, optimizing your settings and system, or reporting them to the mod developers and community.

-

If you are a fan of Diablo 2 and want to experience a new and improved version of the game, you should definitely try out Diablo 2 Fury Within 1.09. It is one of the best mods for Diablo 2 that will keep you entertained for hours.

-

FAQs

-

Here are some frequently asked questions about Diablo 2 Fury Within 1.09:

-
    -
  1. Q: Is Diablo 2 Fury Within 1.09 free?
  2. -
  3. A: Yes, Diablo 2 Fury Within 1.09 is a free mod that you can download and play without paying anything.
  4. -
  5. Q: Is Diablo 2 Fury Within 1.09 safe?
  6. -
  7. A: Yes, Diablo 2 Fury Within 1.09 is a safe mod that does not contain any viruses or malware. However, you should always download it from its official source or from trusted websites that host it.
  8. -
  9. Q: Is Diablo 2 Fury Within 1.09 multiplayer?
  10. -
  11. A: Yes, Diablo 2 Fury Within 1.09 is a multiplayer mod that you can play online or offline with other players. You can join or host games using the Battle.net service or using other third-party programs such as Hamachi or Tunngle.
  12. -
  13. Q: Is Diablo 2 Fury Within 1.09 legal?
  14. -
  15. A: Yes, Diablo 2 Fury Within 1.09 is a legal mod that does not violate any laws or terms of service. However, you should always respect the intellectual property rights of Blizzard Entertainment and the mod developers when using or distributing the mod.
  16. -
  17. Q: Is Diablo 2 Fury Within 1.09 fun?
  18. -

    , fun is subjective and depends on your personal preferences and tastes. You might like or dislike the mod for different reasons. The best way to find out if you like the mod is to try it yourself.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md deleted file mode 100644 index 27febaddbefecff4e2717fdd1d6e3a95d967e967..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md +++ /dev/null @@ -1,33 +0,0 @@ -
    -

    How to Get WinRAR for Free on Mac

    -

    WinRAR is a popular file compression and archiving software that can handle various formats such as RAR, ZIP, CAB, ARJ, LZH, TAR, GZip, UUE, ISO, BZIP2, Z and 7-Zip. WinRAR can help you reduce the size of your files, save disk space, and speed up file transfer. WinRAR also offers features such as encryption, password protection, split archives, and recovery of damaged files.

    -

    free winrar mac


    Download ---> https://byltly.com/2uKzMt



    -

    However, WinRAR is not available for Mac as a graphical user interface (GUI) application. If you want to use WinRAR on Mac, you have to use the command-line version, which requires some technical skills and may not be convenient for most users. Alternatively, you can use one of the many WinRAR alternatives for Mac that offer similar or better functionality and user experience.

    -

    In this article, we will show you how to get WinRAR for free on Mac by using one of the best WinRAR alternatives: Bandizip. Bandizip is a freemium file compression and archiving software that supports various formats such as RAR, ZIP, 7Z, TAR, GZ, ISO, and more. Bandizip also offers features such as encryption, password protection, split archives, preview files, and extraction of multiple archives at once.

    -

    Bandizip is easy to use and has a simple and intuitive interface. You can download Bandizip for free from its official website or from the Mac App Store. Bandizip works on macOS 10.10 or later and requires 64-bit processor. Here are the steps to get WinRAR for free on Mac by using Bandizip:

    -
      -
    1. Download and install Bandizip on your Mac.
    2. -
    3. Launch Bandizip and click on the "New Archive" button on the toolbar.
    4. -
    5. Select the files or folders that you want to compress and click on the "Open" button.
    6. -
    7. Choose the archive format that you want to use. You can select RAR if you want to create a RAR archive compatible with WinRAR.
    8. -
    9. Optionally, you can change the archive name, location, compression level, encryption method, password, split size, and other settings.
    10. -
    11. Click on the "Create" button to start compressing your files.
    12. -
    13. Wait for the compression process to finish. You can see the progress and details on the status bar.
    14. -
    15. You have successfully created a RAR archive using Bandizip. You can find your archive in the location that you specified.
    16. -
    -

    To extract a RAR archive using Bandizip, you can follow these steps:

    -
      -
    1. Launch Bandizip and click on the "Open Archive" button on the toolbar.
    2. -
    3. Select the RAR archive that you want to extract and click on the "Open" button.
    4. -
    5. You can see the contents of the archive in the main window. You can also preview the files by double-clicking on them.
    6. -
    7. Select the files or folders that you want to extract and click on the "Extract" button on the toolbar.
    8. -
    9. Choose the destination folder where you want to save your extracted files.
    10. -
    11. Optionally, you can change the extraction mode, overwrite mode, password, and other settings.
    12. -
    13. Click on the "Extract" button to start extracting your files.
    14. -
    15. Wait for the extraction process to finish. You can see the progress and details on the status bar.
    16. -
    17. You have successfully extracted a RAR archive using Bandizip. You can find your extracted files in the destination folder that you specified.
    18. -
    -

    As you can see, Bandizip is a powerful and easy-to-use file compression and archiving software that can help you get WinRAR for free on Mac. Bandizip also has many other features and options that you can explore and customize according to your preferences. Bandizip is a great WinRAR alternative for Mac that you should try today!

    -

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md deleted file mode 100644 index 0c26ef1645a18cb4b2392891429d34b2f2702cbb..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md +++ /dev/null @@ -1,25 +0,0 @@ - -

    How to Download the Complete Book of the Olympics PDF for Free

    -

    If you are a fan of the Olympic Games and want to learn more about their history, records, and trivia, you might be interested in reading The Complete Book of the Olympics by David Wallechinsky. This book is a comprehensive guide to every edition of the modern Olympics, from Athens 1896 to Tokyo 2020. It covers all the sports, events, athletes, medals, controversies, and stories that have shaped the Olympic movement.

    -

    Complete Book Of Olympics Pdf Download


    Download File · https://imgfil.com/2uy0Ct



    -

    However, this book is not easy to find in print or online. It is out of stock on most bookstores and libraries, and there is no official digital version available. So how can you download the complete book of the Olympics PDF for free?

    -

    The answer is simple: you can use the Internet Archive. The Internet Archive is a non-profit organization that preserves and provides access to millions of books, movies, music, websites, and other digital media. It has a huge collection of public domain and out-of-print books that you can download or read online for free.

    -

    One of these books is The Complete Book of the Olympics by David Wallechinsky. The Internet Archive has a scanned copy of the 1988 edition of this book, which covers the Olympics from 1896 to 1988. You can access this book by visiting this link: https://archive.org/details/completebookofol00wall. On this page, you can see a preview of the book, read it online, or download it as a PDF file.

    -

    -

    To download the complete book of the Olympics PDF for free, you need to follow these steps:

    -
      -
    1. Click on the "PDF" button on the right side of the page.
    2. -
    3. A new window will open with a download link. Click on it to start downloading the file.
    4. -
    5. Save the file to your device and enjoy reading it.
    6. -
    -

    That's it! You have successfully downloaded the complete book of the Olympics PDF for free. You can now enjoy reading this amazing book and learn more about the Olympic Games.

    - -

    If you are wondering why you should read The Complete Book of the Olympics by David Wallechinsky, here are some reasons:

    -
      -
    • It is a fascinating and entertaining book that will keep you hooked for hours. You will discover many interesting facts and anecdotes about the Olympics that you might not know.
    • -
    • It is a valuable source of information and inspiration for anyone who loves sports, history, or culture. You will learn about the achievements and challenges of the Olympic athletes, the evolution and diversity of the Olympic sports, and the impact and legacy of the Olympic Games on the world.
    • -
    • It is a rare and precious book that is hard to find elsewhere. It is not available in most bookstores or libraries, and there is no official digital version. The Internet Archive is the only place where you can download it for free.
    • -
    -

    So what are you waiting for? Download the complete book of the Olympics PDF for free today and enjoy reading this masterpiece of Olympic literature.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md deleted file mode 100644 index 1234763ec9ff057b10bb87c1a94de2824173f220..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md +++ /dev/null @@ -1,126 +0,0 @@ -
    -

    Dinosaur Sim APK: A Fun and Educational Game for Dinosaur Lovers

    -

    Do you love dinosaurs? Do you want to play as one of them and explore a realistic 3D environment? Do you want to learn more about these amazing creatures and their fossils? If you answered yes to any of these questions, then you should try Dinosaur Sim APK, a game that allows you to play as one of the 25 popular dinosaurs and experience their life in different game modes. In this article, we will tell you what Dinosaur Sim APK is, what features it has, how to download and install it, and what are its pros and cons.

    -

    dinosaur sim apk


    Downloadhttps://urlin.us/2uSUQv



    -

    What is Dinosaur Sim APK?

    -

    Dinosaur Sim APK is an Android game developed by 3583 Bytes, a studio that specializes in creating simulation games. It is a game that lets you play as one of the 25 realistic dinosaurs, each with its own animations and sounds. You can fight your way to the top of the food chain or play as a peaceful herbivore in a realistic 3D environment. You can also learn about each of the dinosaurs in the game, color your favorite dinosaurs, and learn about fossils and dinosaur bones in different game modes. Dinosaur Sim APK is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages.

    -

    Features of Dinosaur Sim APK

    -

    Dinosaur Sim APK has many features that make it an enjoyable and informative game. Here are some of them:

    -

    - 25 Playable Dinosaurs

    -

    You can choose from 25 different dinosaurs to play as, each with its own characteristics, abilities, and challenges. You can play as carnivores, herbivores, or omnivores, and experience their life in the wild. Some of the dinosaurs you can play as are Tyrannosaurus Rex, Triceratops, Velociraptor, Stegosaurus, Brachiosaurus, Spinosaurus, and more.

    -

    - 4 Game Modes

    -

    You can play Dinosaur Sim APK in four different game modes, each with its own objectives and features. They are:

    -
      -
    • Dino Simulator mode: This is the main mode where you can roam freely in a realistic 3D environment and hunt, fight, eat, drink, rest, and grow as a dinosaur. You can also interact with other dinosaurs and form packs or herds.
    • -
    • Dino Safari mode: This is an educational mode where you can learn about each of the dinosaurs in the game. You can read facts about their appearance, behavior, diet, habitat, and more. You can also see their skeletons and fossils.
    • -
    • Dino Paint mode: This is a creative mode where you can color your favorite dinosaurs with different colors and patterns. You can also save your creations and share them with your friends.
    • -
    • Dino Museum mode: This is another educational mode where you can learn about fossils and dinosaur bones. You can see how fossils are formed, how they are excavated, how they are studied, and how they are displayed in museums.
    • -
    -

    - Realistic 3D Graphics and Animations

    -

    Dinosaur Sim APK has stunning 3D graphics and animations that make the game look realistic and immersive. The dinosaurs are beautifully modeled and textured, and they move and sound like real animals. The environment is also detailed and varied, with different terrains, plants, weather effects, day and night cycles, and more.

    -

    dinosaur sim apk download
    -dinosaur sim apk mod
    -dinosaur sim apk latest version
    -dinosaur sim apk for android
    -dinosaur sim apk free
    -dinosaur sim apk offline
    -dinosaur sim apk unlimited money
    -dinosaur sim apk 2023
    -dinosaur sim apk hack
    -dinosaur sim apk old version
    -dinosaur sim game apk
    -dinosaur sim 3d apk
    -dinosaur sim 2022 apk
    -dinosaur sim 2021 apk
    -dinosaur sim 2020 apk
    -ultimate dinosaur simulator apk
    -jurassic dinosaur simulator apk
    -wild dinosaur simulator apk
    -real dinosaur simulator apk
    -flying dinosaur simulator apk
    -dino sim apk
    -dino sim game apk
    -dino sim mod apk
    -dino sim download apk
    -dino sim 3d apk
    -dino world simulator apk
    -dino hunter simulator apk
    -dino park simulator apk
    -dino island simulator apk
    -dino rampage simulator apk
    -dinosim apk
    -dinosim game apk
    -dinosim mod apk
    -dinosim download apk
    -dinosim 3d apk
    -dinosim 2023 apk
    -dinosim 2022 apk
    -dinosim 2021 apk
    -dinosim 2020 apk
    -dinosim hack apk
    -dinosim offline apk
    -dinosim free apk
    -dinosim latest version apk
    -dinosim unlimited money apk
    -dinosim old version apk
    -best dinosaur simulator games for android 2023
    -top dinosaur simulator games for android 2022
    -new dinosaur simulator games for android 2021
    -popular dinosaur simulator games for android 2020
    -realistic dinosaur simulator games for android

    -

    - Educational Content

    -

    Dinosaur Sim APK is not only a fun game but also an educational one. It teaches you about dinosaurs and their history in an engaging way. You can learn about their anatomy, evolution,

    classification, behavior, diet, habitat, and more. You can also learn about fossils and how they are formed and studied. The game has a lot of educational content that will enrich your knowledge and curiosity about dinosaurs.

    -

    How to Download and Install Dinosaur Sim APK?

    -

    If you want to play Dinosaur Sim APK on your Android device, you need to download and install it first. Here are the requirements and steps to do so:

    -

    - Requirements

    -

    To play Dinosaur Sim APK, you need to have an Android device that meets the following requirements:

    -
      -
    • Android version: 4.1 or higher
    • -
    • RAM: 2 GB or more
    • -
    • Storage space: 100 MB or more
    • -
    • Internet connection: Required for some features
    • -
    -

    - Steps

    -

    To download and install Dinosaur Sim APK, you need to follow these steps:

    -
      -
    1. Go to the official website of Dinosaur Sim APK at and click on the download button.
    2. -
    3. Wait for the APK file to be downloaded on your device.
    4. -
    5. Go to your device settings and enable the installation of apps from unknown sources.
    6. -
    7. Locate the APK file on your device and tap on it to start the installation process.
    8. -
    9. Follow the instructions on the screen and wait for the installation to be completed.
    10. -
    11. Launch the game and enjoy playing as a dinosaur.
    12. -
    -

    Pros and Cons of Dinosaur Sim APK

    -

    Dinosaur Sim APK is a game that has many advantages but also some disadvantages. Here are some of them:

    -

    - Pros

    -
      -
    • It is a fun and educational game that appeals to dinosaur lovers of all ages.
    • -
    • It has 25 playable dinosaurs with realistic 3D graphics and animations.
    • -
    • It has four game modes that offer different gameplay experiences and learning opportunities.
    • -
    • It has a lot of educational content that teaches you about dinosaurs and fossils in an engaging way.
    • -
    • It is free to download and play, with no in-app purchases or ads.
    • -
    -

    - Cons

    -
      -
    • It requires a lot of storage space and RAM to run smoothly.
    • -
    • It requires an internet connection for some features, such as saving your progress or sharing your creations.
    • -
    • It may have some bugs or glitches that affect the gameplay quality.
    • -
    • It may not be compatible with some devices or Android versions.
    • -
    -

    Conclusion

    -

    Dinosaur Sim APK is a game that lets you play as one of the 25 realistic dinosaurs and experience their life in different game modes. It is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages. You can download and install it for free on your Android device and enjoy playing as a dinosaur. However, you should also be aware of its pros and cons before playing it. We hope this article has helped you learn more about Dinosaur Sim APK and how to play it. If you have any questions or feedback, feel free to leave them in the comments section below.

    -

    FAQs

    -

    Here are some frequently asked questions about Dinosaur Sim APK:

    -
      -
    1. What is the latest version of Dinosaur Sim APK?
    2. -

      The latest version of Dinosaur Sim APK is 1.5.0, which was released on June 15, 2023. It added new dinosaurs, new features, bug fixes, and performance improvements.

      -
    3. Can I play Dinosaur Sim APK offline?
    4. -

      You can play Dinosaur Sim APK offline in some game modes, such as Dino Simulator mode and Dino Paint mode. However, you need an internet connection for some features, such as saving your progress or sharing your creations.

      -
    5. Can I play Dinosaur Sim APK on PC?
    6. -

      You can play Dinosaur Sim APK on PC by using an Android emulator, such as BlueStacks or NoxPlayer. However, you may experience some compatibility issues or performance issues depending on your PC specifications.

      -
    7. How can I update Dinosaur Sim APK?
    8. -

      You can update Dinosaur Sim APK by downloading the latest version from the official website or by checking for updates in the game settings. You should always update the game to enjoy the latest features and bug fixes.

      -
    9. Is Dinosaur Sim APK safe to download and install?
    10. -

      Dinosaur Sim APK is safe to download and install on your Android device, as long as you download it from the official website or a trusted source. However, you should always be careful when installing apps from unknown sources and check the permissions and reviews before installing them.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md b/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md deleted file mode 100644 index 2020ae450bec4f008abfa88f4b0b77d37b236a26..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md +++ /dev/null @@ -1,114 +0,0 @@ -
      -

      Parking Master Multiplayer 2 Mod APK 2023: The Ultimate Parking Game

      -

      Introduction

      -

      Do you love parking games? Do you want to test your driving skills and challenge your friends in a realistic and fun parking simulator? If yes, then you should try Parking Master Multiplayer 2, the best parking game for Android devices.

      -

      parking master multiplayer 2 mod apk 2023


      Download ———>>> https://jinyurl.com/2uNNll



      -

      What is Parking Master Multiplayer 2?

      -

      Parking Master Multiplayer 2 is a parking game developed by \uE000Games\uE001 Studio, a popular game developer that has created many other successful games such as \uE000Racing\uE001 Fever and \uE000Drift\uE001 Max. In this game, you can choose from a variety of cars, from sports cars to trucks, and park them in different scenarios, such as city streets, parking lots, airports, and more. You can also customize your cars with different colors, stickers, wheels, and accessories.

      -

      Why do you need Parking Master Multiplayer 2 Mod APK 2023?

      -

      Parking Master Multiplayer 2 is a free game, but it has some limitations that can affect your gaming experience. For example, you need to watch ads to get more fuel or unlock new cars. You also need to earn coins and gems to upgrade your cars or buy new ones. These things can be frustrating and time-consuming, especially if you want to enjoy the game without any interruptions or restrictions.

      -

      That's why you need Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, and more. With this mod apk, you can play the game as much as you want, without worrying about running out of fuel or watching annoying ads. You can also access all the cars in the game, from the cheapest to the most expensive ones, and customize them to your liking. You can also enjoy the realistic graphics and physics of the game, as well as the multiplayer mode that lets you compete with other players online.

      -

      Features of Parking Master Multiplayer 2 Mod APK 2023

      -

      Unlimited Fuel

      -

      One of the main features of Parking Master Multiplayer 2 Mod APK 2023 is unlimited fuel. In the original game, you have a limited amount of fuel that decreases as you drive your car. When you run out of fuel, you have to watch an ad or pay with gems to refill it. This can be annoying and interrupt your gameplay.

      -

      With Parking Master Multiplayer 2 Mod APK 2023, you don't have to worry about fuel anymore. You have unlimited fuel that never runs out, no matter how long or how far you drive your car. You can play the game without any interruptions or limitations.

      -

      No Ads

      -

      Another feature of Parking Master Multiplayer 2 Mod APK 2023 is no ads. In the original game, you have to watch ads to get more fuel, unlock new cars, or get extra rewards. These ads can be boring and waste your time.

      -

      With Parking Master Multiplayer 2 Mod APK 2023, you don't have to watch any ads at all. You can play the game without any distractions or delays. You can also save your mobile data and battery life by avoiding unnecessary ads.

      -

      All Cars Unlocked

      -

      A third feature of Parking Master Multiplayer 2 Mod APK 2023 is all cars unlocked. In the original game, you have to earn coins and gems to unlock new cars or buy them with real money. There are many cars in the game, from sports cars to trucks, but they are not all available at the beginning. You have to complete levels and missions to unlock them or pay for them.

      -

      With Parking Master Multiplayer 2 Mod APK 2023, you don't have to do any of that. You can access all the cars in the game from the start, without spending any coins, gems, or money. You can choose any car you want and enjoy its features and performance.

      -

      parking master multiplayer 2 mod apk 2023 download
      -parking master multiplayer 2 mod apk 2023 unlimited money
      -parking master multiplayer 2 mod apk 2023 latest version
      -parking master multiplayer 2 mod apk 2023 free
      -parking master multiplayer 2 mod apk 2023 android
      -parking master multiplayer 2 mod apk 2023 online
      -parking master multiplayer 2 mod apk 2023 hack
      -parking master multiplayer 2 mod apk 2023 cheats
      -parking master multiplayer 2 mod apk 2023 gameplay
      -parking master multiplayer 2 mod apk 2023 review
      -parking master multiplayer 2 mod apk 2023 features
      -parking master multiplayer 2 mod apk 2023 update
      -parking master multiplayer 2 mod apk 2023 install
      -parking master multiplayer 2 mod apk 2023 guide
      -parking master multiplayer 2 mod apk 2023 tips
      -parking master multiplayer 2 mod apk 2023 tricks
      -parking master multiplayer 2 mod apk 2023 best cars
      -parking master multiplayer 2 mod apk 2023 customizations
      -parking master multiplayer 2 mod apk 2023 maps
      -parking master multiplayer 2 mod apk 2023 missions
      -parking master multiplayer 2 mod apk 2023 challenges
      -parking master multiplayer 2 mod apk 2023 levels
      -parking master multiplayer 2 mod apk 2023 modes
      -parking master multiplayer 2 mod apk 2023 graphics
      -parking master multiplayer 2 mod apk 2023 sound
      -parking master multiplayer 2 mod apk 2023 controls
      -parking master multiplayer 2 mod apk 2023 settings
      -parking master multiplayer 2 mod apk 2023 requirements
      -parking master multiplayer 2 mod apk 2023 size
      -parking master multiplayer 2 mod apk 2023 rating
      -parking master multiplayer 2 mod apk download for pc
      -how to play parking master multiplayer 2 with friends
      -how to get parking master multiplayer 2 for free
      -how to unlock all cars in parking master multiplayer 2
      -how to park like a pro in parking master multiplayer 2
      -how to win every race in parking master multiplayer 2
      -how to earn more money in parking master multiplayer 2
      -how to upgrade your car in parking master multiplayer

      -

      Realistic Graphics and Physics

      -

      A fourth feature of Parking Master Multiplayer 2 Mod APK 2023 is realistic graphics and physics. The game has amazing graphics that make you feel like you are driving a real car in a real environment. The game also has realistic physics that simulate the behavior of the car and the environment, such as gravity, friction, inertia, and collision.

      -

      With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the same graphics and physics as the original game, but with better performance and smoother gameplay. You can also adjust the graphics settings to suit your device and preference.

      -

      Multiplayer Mode

      -

      A fifth feature of Parking Master Multiplayer 2 Mod APK 2023 is multiplayer mode. The game has a multiplayer mode that lets you play with other players online. You can join or create a room and invite your friends or random players to join you. You can also chat with them and see their scores and rankings.

      -

      With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the multiplayer mode without any limitations or problems. You can play with anyone you want, without worrying about lagging or disconnecting. You can also have more fun and challenge by competing with other players who have the same mod apk as you.

      -

      How to download and install Parking Master Multiplayer 2 Mod APK 2023?

      -

      Step 1: Download the APK file from the link below

      -

      The first step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to download the APK file from the link below. The link will take you to a secure and reliable website where you can download the file safely and quickly.

      -

      Download Parking Master Multiplayer 2 Mod APK 2023 here

      -

      Step 2: Enable unknown sources on your device

      -

      The second step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to enable unknown sources on your device. This is necessary because the mod apk is not from the official Google Play Store, so you need to allow your device to install apps from other sources.

      -

      To enable unknown sources, go to your device settings, then security, then unknown sources. Turn on the switch or check the box to enable it. You may also see a pop-up message asking for your permission. Tap on OK or Allow to confirm it.

      -

      Step 3: Install the APK file and enjoy the game

      -

      The third step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to install the APK file and enjoy the game. To install the APK file, go to your file manager or downloads folder and find the file you downloaded. Tap on it and follow the instructions on the screen to install it.

      -

      Once the installation is done, you can open the game and start playing it. You will see that you have unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode. You can also customize your cars and settings as you wish.

      -

      Conclusion

      -

      Parking Master Multiplayer 2 is a parking game that tests your driving skills and challenges your friends in a realistic and fun parking simulator. It has many features that make it one of the best parking games for Android devices.

      -

      However, if you want to enjoy the game without any limitations or interruptions, you should download Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode.

      -

      To download Parking Master Multiplayer 2 Mod APK 2023, just follow these three simple steps:

      -
        -
      1. Download the APK file from the link below
      2. -
      3. Enable unknown sources on your device
      4. -
      5. Install the APK file and enjoy the game
      6. -
      -

      Parking Master Multiplayer 2 Mod APK 2023 is a great way to have more fun and challenge in parking games. Download it now and see for yourself!

      -

      FAQs

      -

      Here are some frequently asked questions about Parking Master Multiplayer 2 Mod APK 2023:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      Is Parking Master Multiplayer 2 Mod APK 2023 safe to use?Yes, Parking Master Multiplayer 2 Mod APK 2023 is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not require any root or jailbreak to work.
      Is Parking Master Multiplayer 2 Mod APK 2023 compatible with my device?Parking Master Multiplayer 2 Mod APK 2023 is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not support the game or the mod apk due to different specifications or settings. If you encounter any problems, you can contact the developer or try another device.
      Can I play Parking Master Multiplayer 2 Mod APK 2023 offline?Parking Master Multiplayer 2 Mod APK 2023 can be played offline, but you will not be able to access the multiplayer mode or some online features. You will also need an internet connection to download and install the mod apk.
      Can I update Parking Master Multiplayer 2 Mod APK 2023?Parking Master Multiplayer 2 Mod APK 2023 may not be compatible with the latest version of the game, so you should not update the game or the mod apk unless there is a new version of the mod apk available. You can check for updates on the website where you downloaded the mod apk or on this page.
      Can I share Parking Master Multiplayer 2 Mod APK 2023 with my friends?Yes, you can share Parking Master Multiplayer 2 Mod APK 2023 with your friends, but only for personal and non-commercial use. You should not distribute or sell the mod apk without the permission of the developer or the owner of the game.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md b/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md deleted file mode 100644 index 32a15afd7544b8cfecb727231432376aa8c9917e..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md +++ /dev/null @@ -1,3 +0,0 @@ -dummy3 policy - -https://voicevox.hiroshiba.jp/ diff --git a/spaces/30Kanika/Animal_Image_Classifier/README.md b/spaces/30Kanika/Animal_Image_Classifier/README.md deleted file mode 100644 index 6af4859a98fd15fa34682e51d0ea614b75606bce..0000000000000000000000000000000000000000 --- a/spaces/30Kanika/Animal_Image_Classifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Animal Image Classifier -emoji: 🌍 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py b/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py deleted file mode 100644 index 99dd5ced088d6d8c11c2fb46c0778c69286685f1..0000000000000000000000000000000000000000 --- a/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py +++ /dev/null @@ -1,191 +0,0 @@ -import torch -import torch.nn as nn -from model.block import SAB, CAB, PAB, conv, SAM, conv3x3, conv_down - -########################################################################## -## U-Net -bn = 2 # block number-1 - -class Encoder(nn.Module): - def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block): - super(Encoder, self).__init__() - if block == 'CAB': - self.encoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'PAB': - self.encoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'SAB': - self.encoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level1 = nn.Sequential(*self.encoder_level1) - self.encoder_level2 = nn.Sequential(*self.encoder_level2) - self.encoder_level3 = nn.Sequential(*self.encoder_level3) - self.down12 = DownSample(n_feat, scale_unetfeats) - self.down23 = DownSample(n_feat + scale_unetfeats, scale_unetfeats) - - def forward(self, x): - enc1 = self.encoder_level1(x) - x = self.down12(enc1) - enc2 = self.encoder_level2(x) - x = self.down23(enc2) - enc3 = self.encoder_level3(x) - return [enc1, enc2, enc3] - -class Decoder(nn.Module): - def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block): - super(Decoder, self).__init__() - if block == 'CAB': - self.decoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'PAB': - self.decoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'SAB': - self.decoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level1 = nn.Sequential(*self.decoder_level1) - self.decoder_level2 = nn.Sequential(*self.decoder_level2) - self.decoder_level3 = nn.Sequential(*self.decoder_level3) - if block == 'CAB': - self.skip_attn1 = CAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - if block == 'PAB': - self.skip_attn1 = PAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - if block == 'SAB': - self.skip_attn1 = SAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - self.up21 = SkipUpSample(n_feat, scale_unetfeats) - self.up32 = SkipUpSample(n_feat + scale_unetfeats, scale_unetfeats) - - def forward(self, outs): - enc1, enc2, enc3 = outs - dec3 = self.decoder_level3(enc3) - x = self.up32(dec3, self.skip_attn2(enc2)) - dec2 = self.decoder_level2(x) - x = self.up21(dec2, self.skip_attn1(enc1)) - dec1 = self.decoder_level1(x) - return [dec1, dec2, dec3] - -########################################################################## -##---------- Resizing Modules ---------- -class DownSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(DownSample, self).__init__() - self.down = nn.Sequential(nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels, in_channels + s_factor, 1, stride=1, padding=0, bias=False)) - - def forward(self, x): - x = self.down(x) - return x - -class UpSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(UpSample, self).__init__() - self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False)) - - def forward(self, x): - x = self.up(x) - return x - -class SkipUpSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(SkipUpSample, self).__init__() - self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False)) - - def forward(self, x, y): - x = self.up(x) - x = x + y - return x - -########################################################################## -# Mixed Residual Module -class Mix(nn.Module): - def __init__(self, m=1): - super(Mix, self).__init__() - w = nn.Parameter(torch.FloatTensor([m]), requires_grad=True) - w = nn.Parameter(w, requires_grad=True) - self.w = w - self.mix_block = nn.Sigmoid() - - def forward(self, fea1, fea2, feat3): - factor = self.mix_block(self.w) - other = (1 - factor)/2 - output = fea1 * other.expand_as(fea1) + fea2 * factor.expand_as(fea2) + feat3 * other.expand_as(feat3) - return output, factor - -########################################################################## -# Architecture -class CMFNet(nn.Module): - def __init__(self, in_c=3, out_c=3, n_feat=96, scale_unetfeats=48, kernel_size=3, reduction=4, bias=False): - super(CMFNet, self).__init__() - - p_act = nn.PReLU() - self.shallow_feat1 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - self.shallow_feat2 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - self.shallow_feat3 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - - self.stage1_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB') - self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB') - - self.stage2_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB') - self.stage2_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB') - - self.stage3_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB') - self.stage3_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB') - - self.sam1o = SAM(n_feat, kernel_size=3, bias=bias) - self.sam2o = SAM(n_feat, kernel_size=3, bias=bias) - self.sam3o = SAM(n_feat, kernel_size=3, bias=bias) - - self.mix = Mix(1) - self.add123 = conv(out_c, out_c, kernel_size, bias=bias) - self.concat123 = conv(n_feat*3, n_feat, kernel_size, bias=bias) - self.tail = conv(n_feat, out_c, kernel_size, bias=bias) - - - def forward(self, x): - ## Compute Shallow Features - shallow1 = self.shallow_feat1(x) - shallow2 = self.shallow_feat2(x) - shallow3 = self.shallow_feat3(x) - - ## Enter the UNet-CAB - x1 = self.stage1_encoder(shallow1) - x1_D = self.stage1_decoder(x1) - ## Apply SAM - x1_out, x1_img = self.sam1o(x1_D[0], x) - - ## Enter the UNet-PAB - x2 = self.stage2_encoder(shallow2) - x2_D = self.stage2_decoder(x2) - ## Apply SAM - x2_out, x2_img = self.sam2o(x2_D[0], x) - - ## Enter the UNet-SAB - x3 = self.stage3_encoder(shallow3) - x3_D = self.stage3_decoder(x3) - ## Apply SAM - x3_out, x3_img = self.sam3o(x3_D[0], x) - - ## Aggregate SAM features of Stage 1, Stage 2 and Stage 3 - mix_r = self.mix(x1_img, x2_img, x3_img) - mixed_img = self.add123(mix_r[0]) - - ## Concat SAM features of Stage 1, Stage 2 and Stage 3 - concat_feat = self.concat123(torch.cat([x1_out, x2_out, x3_out], 1)) - x_final = self.tail(concat_feat) - - return x_final + mixed_img \ No newline at end of file diff --git a/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md b/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md deleted file mode 100644 index e4821a5e6f0e0b1c4b70260eca077c703ca7c75c..0000000000000000000000000000000000000000 --- a/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 08 Search Streamlit Session State QueryParameters -emoji: 🔎🧠 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py b/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py deleted file mode 100644 index 56000ede939b2328cb4aea13c5827dc072c9fe0e..0000000000000000000000000000000000000000 --- a/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py +++ /dev/null @@ -1,169 +0,0 @@ -import gradio as gr -import torch -import time -import librosa -import soundfile -import nemo.collections.asr as nemo_asr -import tempfile -import os -import uuid - -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration -import torch - -# PersistDataset ----- -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -# --------------------------------------------- -# Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions -# This should allow you to save your results to your own Dataset hosted on HF. --- -#DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv" -#DATASET_REPO_ID = "awacke1/Carddata.csv" -#DATA_FILENAME = "Carddata.csv" -#DATA_FILE = os.path.join("data", DATA_FILENAME) -#HF_TOKEN = os.environ.get("HF_TOKEN") -#SCRIPT = """ - -# -#""" - -#try: -# hf_hub_download( -# repo_id=DATASET_REPO_ID, -# filename=DATA_FILENAME, -# cache_dir=DATA_DIRNAME, -# force_filename=DATA_FILENAME -# ) -#except: -# print("file not found") -#repo = Repository( -# local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -#) - -#def store_message(name: str, message: str): -# if name and message: -# with open(DATA_FILE, "a") as csvfile: -# writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) -# writer.writerow( -# {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} -# ) -# # uncomment line below to begin saving - -# commit_url = repo.push_to_hub() -# return "" - -#iface = gr.Interface( -# store_message, -# [ -# inputs.Textbox(placeholder="Your name"), -# inputs.Textbox(placeholder="Your message", lines=2), -# ], -# "html", -# css=""" -# .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } -# """, -# title="Reading/writing to a HuggingFace dataset repo from Spaces", -# description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", -# article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", -#) - - -# main ------------------------- -mname = "facebook/blenderbot-400M-distill" -model = BlenderbotForConditionalGeneration.from_pretrained(mname) -tokenizer = BlenderbotTokenizer.from_pretrained(mname) - -def take_last_tokens(inputs, note_history, history): - """Filter the last 128 tokens""" - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = [' '.join(note_history[0].split(' ')[2:])] - history = history[1:] - return inputs, note_history, history - -def add_note_to_history(note, note_history): - """Add a note to the historical information""" - note_history.append(note) - note_history = ' '.join(note_history) - return [note_history] - - -def chat(message, history): - history = history or [] - if history: - history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] - else: - history_useful = [] - history_useful = add_note_to_history(message, history_useful) - inputs = tokenizer(history_useful, return_tensors="pt") - inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) - reply_ids = model.generate(**inputs) - response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] - history_useful = add_note_to_history(response, history_useful) - list_history = history_useful[0].split(' ') - history.append((list_history[-2], list_history[-1])) -# store_message(message, response) # Save to dataset - uncomment if you uncomment above to save inputs and outputs to your dataset - return history, history - - -SAMPLE_RATE = 16000 -model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge") -model.change_decoding_strategy(None) -model.eval() - -def process_audio_file(file): - data, sr = librosa.load(file) - if sr != SAMPLE_RATE: - data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE) - # monochannel - data = librosa.to_mono(data) - return data - - -def transcribe(audio, state = ""): - if state is None: - state = "" - audio_data = process_audio_file(audio) - with tempfile.TemporaryDirectory() as tmpdir: - audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') - soundfile.write(audio_path, audio_data, SAMPLE_RATE) - transcriptions = model.transcribe([audio_path]) - if type(transcriptions) == tuple and len(transcriptions) == 2: - transcriptions = transcriptions[0] - transcriptions = transcriptions[0] -# store_message(transcriptions, state) # Save to dataset - uncomment to store into a dataset - hint you will need your HF_TOKEN - state = state + transcriptions + " " - return state, state - -iface = gr.Interface( - fn=transcribe, - inputs=[ - gr.Audio(source="microphone", type='filepath', streaming=True), - "state", - ], - outputs=[ - "textbox", - "state", - ], - layout="horizontal", - theme="huggingface", - title="🗣️LiveSpeechRecognition🧠Memory💾", - description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.", - allow_flagging='never', - live=True, -# article=f"Result Output Saved to Memory💾 Dataset: [{DATASET_REPO_URL}]({DATASET_REPO_URL})" - article=f"Important Videos to understanding AI and NLP Clinical Terminology, Assessment, and Value Based Care AI include Huggingfaces Course Series here: https://www.youtube.com/c/HuggingFace , AI NLP Innovations in 2022 for Clinical and Mental Health Care here: https://www.youtube.com/watch?v=r38lXjz3g6M&list=PLHgX2IExbFov_5_4WfkesR7gnWPHHG-a1 and this link to see and manage playlist here: https://www.youtube.com/playlist?list=PLHgX2IExbFov_5_4WfkesR7gnWPHHG-a1 Review at your leisure to understand AI and NLP impact to helping the world develop Clinical systems of the future using AI and NLP for Clinical Terminology and alignment to worldwide Value Based Care objectives to help people be healthy." -) -iface.launch() diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/server/internal.js b/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/server/internal.js deleted file mode 100644 index 844afbf07d5c527f4e6fa556acc6bb145af22d62..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/server/internal.js +++ /dev/null @@ -1,30 +0,0 @@ - -import root from '../root.svelte'; -import { set_building } from '__sveltekit/environment'; -import { set_assets } from '__sveltekit/paths'; -import { set_private_env, set_public_env } from '../../../node_modules/@sveltejs/kit/src/runtime/shared-server.js'; - -export const options = { - app_template_contains_nonce: false, - csp: {"mode":"auto","directives":{"upgrade-insecure-requests":false,"block-all-mixed-content":false},"reportOnly":{"upgrade-insecure-requests":false,"block-all-mixed-content":false}}, - csrf_check_origin: false, - track_server_fetches: false, - embedded: false, - env_public_prefix: 'PUBLIC_', - env_private_prefix: '', - hooks: null, // added lazily, via `get_hooks` - preload_strategy: "modulepreload", - root, - service_worker: false, - templates: { - app: ({ head, body, assets, nonce, env }) => "\r\n\r\n\t\r\n\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t" + head + "\r\n\t\r\n\t\r\n\t\t
      " + body + "
      \r\n\t\r\n\r\n", - error: ({ status, message }) => "\n\n\t\n\t\t\n\t\t" + message + "\n\n\t\t\n\t\n\t\n\t\t
      \n\t\t\t" + status + "\n\t\t\t
      \n\t\t\t\t

      " + message + "

      \n\t\t\t
      \n\t\t
      \n\t\n\n" - }, - version_hash: "r3vpsq" -}; - -export function get_hooks() { - return import("../../../src/hooks.server.ts"); -} - -export { set_assets, set_building, set_private_env, set_public_env }; diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts deleted file mode 100644 index c4c51077af8f6ac7a90522915c567e39e6e8e75f..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts +++ /dev/null @@ -1,59 +0,0 @@ -import ColorInputBase from '../colorinputbase/ColorInputBase'; -import RoundRectangle from '../../roundrectangle/RoundRectangle'; -import ColorComponents from '../colorcomponents/ColorComponents'; -import CanvasInput from '../../canvasinput/CanvasInput'; - -export default ColorInput; - -declare namespace ColorInput { - type TransitCallbackType = ( - gameObject: Phaser.GameObjects.GameObject, - duration: number - ) => void; - - interface IConfig extends ColorInputBase.IConfig { - colorPicker?: { - width?: number, height?: number, - - background?: RoundRectangle.IConfig, - createBackgroundCallback: ( - scene: Phaser.Scene, - ) => Phaser.GameObjects.GameObject, - - hPalettePosition?: 0 | 1 | 2 | 3 | 'bottom' | 'left' | 'top' | 'right', - - expandDirection?: 0 | 1 | 'down' | 'up', - - easeIn?: number, easeOut?: number, - - transitIn?: TransitCallbackType, - transitOut?: TransitCallbackType, - - bounds?: Phaser.Geom.Rectangle; - - space?: { - left?: number, right?: number, top?: number, bottom?: number, - item?: number, - } - }, - - colorComponents?: { - height?: number, - - formatLabel?: ColorComponents.IFormatLabelConfig, - - inputText?: CanvasInput.IConfig, - - space?: { - left?: number, right?: number, top?: number, bottom?: number, - }, - } - } -} - -declare class ColorInput extends ColorInputBase { - constructor( - scene: Phaser.Scene, - config?: ColorInput.IConfig - ); -} \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js deleted file mode 100644 index 94439e7a9a4e84776817d958d8392761d597257c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import RoundRectangle from './RoundRectangle.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('roundRectangle', function (x, y, width, height, radiusConfig, fillColor, fillAlpha) { - var gameObject = new RoundRectangle(this.scene, x, y, width, height, radiusConfig, fillColor, fillAlpha); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.UI.RoundRectangle', RoundRectangle); - -export default RoundRectangle; \ No newline at end of file diff --git a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py b/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py deleted file mode 100644 index a39a19458c75449c65d3e7810974eededb9d2d67..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py +++ /dev/null @@ -1,27 +0,0 @@ -from torch import nn -import torch -from torchvision import models - -class KPDetector(nn.Module): - """ - Predict K*5 keypoints. - """ - - def __init__(self, num_tps, **kwargs): - super(KPDetector, self).__init__() - self.num_tps = num_tps - - self.fg_encoder = models.resnet18(pretrained=False) - num_features = self.fg_encoder.fc.in_features - self.fg_encoder.fc = nn.Linear(num_features, num_tps*5*2) - - - def forward(self, image): - - fg_kp = self.fg_encoder(image) - bs, _, = fg_kp.shape - fg_kp = torch.sigmoid(fg_kp) - fg_kp = fg_kp * 2 - 1 - out = {'fg_kp': fg_kp.view(bs, self.num_tps*5, -1)} - - return out diff --git a/spaces/AlexKozachuk/anything-v3.0/README.md b/spaces/AlexKozachuk/anything-v3.0/README.md deleted file mode 100644 index d2e09658fa22b5fdc59854bde8a4ffb008f84df3..0000000000000000000000000000000000000000 --- a/spaces/AlexKozachuk/anything-v3.0/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Anything V3.0 -emoji: 🏃 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -duplicated_from: yuessiah/anything-v3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Allakhazam/Home/README.md b/spaces/Allakhazam/Home/README.md deleted file mode 100644 index 18d737702c56bc72f1bb74db10d167c23e28b23f..0000000000000000000000000000000000000000 --- a/spaces/Allakhazam/Home/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Home Prompts -emoji: 🏆 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py deleted file mode 100644 index 18176ee8eb0d992d69d5b951d7f36e2efa92a37b..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py +++ /dev/null @@ -1,17 +0,0 @@ - -import torch -import clip - - -class CLIPLoss(torch.nn.Module): - - def __init__(self, opts): - super(CLIPLoss, self).__init__() - self.model, self.preprocess = clip.load("ViT-B/32", device="cuda") - self.upsample = torch.nn.Upsample(scale_factor=7) - self.avg_pool = torch.nn.AvgPool2d(kernel_size=opts.stylegan_size // 32) - - def forward(self, image, text): - image = self.avg_pool(self.upsample(image)) - similarity = 1 - self.model(image, text)[0] / 100 - return similarity \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py deleted file mode 100644 index e58952aa207fc6b6211f3e8faf6f93992d576acf..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import torch -import tqdm - -from ...models.unet_1d import UNet1DModel -from ...pipelines import DiffusionPipeline -from ...utils import randn_tensor -from ...utils.dummy_pt_objects import DDPMScheduler - - -class ValueGuidedRLPipeline(DiffusionPipeline): - r""" - Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Parameters: - value_function ([`UNet1DModel`]): - A specialized UNet for fine-tuning trajectories base on reward. - unet ([`UNet1DModel`]): - UNet architecture to denoise the encoded trajectories. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this - application is [`DDPMScheduler`]. - env (): - An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. - """ - - def __init__( - self, - value_function: UNet1DModel, - unet: UNet1DModel, - scheduler: DDPMScheduler, - env, - ): - super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env - self.data = env.get_dataset() - self.means = {} - for key in self.data.keys(): - try: - self.means[key] = self.data[key].mean() - except: # noqa: E722 - pass - self.stds = {} - for key in self.data.keys(): - try: - self.stds[key] = self.data[key].std() - except: # noqa: E722 - pass - self.state_dim = env.observation_space.shape[0] - self.action_dim = env.action_space.shape[0] - - def normalize(self, x_in, key): - return (x_in - self.means[key]) / self.stds[key] - - def de_normalize(self, x_in, key): - return x_in * self.stds[key] + self.means[key] - - def to_torch(self, x_in): - if type(x_in) is dict: - return {k: self.to_torch(v) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(self.unet.device) - return torch.tensor(x_in, device=self.unet.device) - - def reset_x0(self, x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - def run_diffusion(self, x, conditions, n_guide_steps, scale): - batch_size = x.shape[0] - y = None - for i in tqdm.tqdm(self.scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - for _ in range(n_guide_steps): - with torch.enable_grad(): - x.requires_grad_() - - # permute to match dimension for pre-trained models - y = self.value_function(x.permute(0, 2, 1), timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - - posterior_variance = self.scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - - grad[timesteps < 2] = 0 - x = x.detach() - x = x + scale * grad - x = self.reset_x0(x, conditions, self.action_dim) - - prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - - # TODO: verify deprecation of this kwarg - x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # apply conditions to the trajectory (set the initial state) - x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x) - return x, y - - def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): - # normalize the observations and create batch dimension - obs = self.normalize(obs, "observations") - obs = obs[None].repeat(batch_size, axis=0) - - conditions = {0: self.to_torch(obs)} - shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) - - # generate initial noise and apply our conditions (to make the trajectories start at current state) - x1 = randn_tensor(shape, device=self.unet.device) - x = self.reset_x0(x1, conditions, self.action_dim) - x = self.to_torch(x) - - # run the diffusion process - x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) - - # sort output trajectories by value - sorted_idx = y.argsort(0, descending=True).squeeze() - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, : self.action_dim] - actions = actions.detach().cpu().numpy() - denorm_actions = self.de_normalize(actions, key="actions") - - # select the action with the highest value - if y is not None: - selected_index = 0 - else: - # if we didn't run value guiding, select a random action - selected_index = np.random.randint(0, batch_size) - - denorm_actions = denorm_actions[selected_index, 0] - return denorm_actions diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py deleted file mode 100644 index b4eabb9e3a0e18dd71a445bb8960b27d8699daac..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionImageVariationPipeline -from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): - pass - - -@slow -@require_torch_gpu -class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase): - def test_inference_image_variations(self): - pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion") - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - image_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe( - image=image_prompt, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py deleted file mode 100644 index 816d206f5735c008cd6bca6e3cbf7a81fdd9b619..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py +++ /dev/null @@ -1,140 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_swin_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - embed_dim=96, - depths=[2, 2, 18, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - ape=False, - drop_path_rate=0.2, - patch_norm=True, - use_checkpoint=False - ), - neck=dict(in_channels=[96, 192, 384, 768]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index f7b07c4f47629c07faa013b9d1eae3462d898c6f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = [ - '../_base_/models/dnl_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) -optimizer = dict( - paramwise_cfg=dict( - custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.)))) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index fb7c3d55d57b09296ea24889b218f9a0fb997463..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py b/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py deleted file mode 100644 index b45b2c27e0a05c275cbc50064288aece3ae3e856..0000000000000000000000000000000000000000 --- a/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py +++ /dev/null @@ -1,186 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..general import xywh2xyxy -from ..loss import FocalLoss, smooth_BCE -from ..metrics import bbox_iou -from ..torch_utils import de_parallel -from .general import crop_mask - - -class ComputeLoss: - # Compute losses - def __init__(self, model, autobalance=False, overlap=False): - self.sort_obj_iou = False - self.overlap = overlap - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - self.device = device - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - m = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - self.na = m.na # number of anchors - self.nc = m.nc # number of classes - self.nl = m.nl # number of layers - self.nm = m.nm # number of masks - self.anchors = m.anchors - self.device = device - - def __call__(self, preds, targets, masks): # predictions, targets, model - p, proto = preds - bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width - lcls = torch.zeros(1, device=self.device) - lbox = torch.zeros(1, device=self.device) - lobj = torch.zeros(1, device=self.device) - lseg = torch.zeros(1, device=self.device) - tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - - n = b.shape[0] # number of targets - if n: - pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions - - # Box regression - pxy = pxy.sigmoid() * 2 - 0.5 - pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - iou = iou.detach().clamp(0).type(tobj.dtype) - if self.sort_obj_iou: - j = iou.argsort() - b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] - if self.gr < 1: - iou = (1.0 - self.gr) + self.gr * iou - tobj[b, a, gj, gi] = iou # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(pcls, self.cn, device=self.device) # targets - t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(pcls, t) # BCE - - # Mask regression - if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] - marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized - mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) - for bi in b.unique(): - j = b == bi # matching index - if self.overlap: - mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) - else: - mask_gti = masks[tidxs[i]][j] - lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs - - loss = lbox + lobj + lcls + lseg - return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() - - def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): - # Mask loss for one image - pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") - return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] - gain = torch.ones(8, device=self.device) # normalized to gridspace gain - ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - if self.overlap: - batch = p[0].shape[0] - ti = [] - for i in range(batch): - num = (targets[:, 0] == i).sum() # find number of targets of each image - ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) - ti = torch.cat(ti, 1) # (na, nt) - else: - ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) - targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets - - for i in range(self.nl): - anchors, shape = self.anchors[i], p[i].shape - gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain # shape(3,n,7) - if nt: - # Matches - r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1 < g) & (gxy > 1)).T - l, m = ((gxi % 1 < g) & (gxi > 1)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors - (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class - gij = (gxy - offsets).long() - gi, gj = gij.T # grid indices - - # Append - indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - tidxs.append(tidx) - xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized - - return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/spaces/ArkanDash/rvc-models/infer_pack/commons.py b/spaces/ArkanDash/rvc-models/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/ArkanDash/rvc-models/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py b/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py deleted file mode 100644 index bf621e53dd2b467d5dcef4817fed0482c94ae458..0000000000000000000000000000000000000000 --- a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py +++ /dev/null @@ -1,208 +0,0 @@ -# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py - -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange - - -class InflatedConv3d(nn.Conv2d): - def forward(self, x): - video_length = x.shape[2] - - x = rearrange(x, "b c f h w -> (b f) c h w") - x = super().forward(x) - x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length) - - return x - - -class Upsample3D(nn.Module): - def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_conv_transpose = use_conv_transpose - self.name = name - - conv = None - if use_conv_transpose: - raise NotImplementedError - elif use_conv: - conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1) - - if name == "conv": - self.conv = conv - else: - self.Conv2d_0 = conv - - def forward(self, hidden_states, output_size=None): - assert hidden_states.shape[1] == self.channels - - if self.use_conv_transpose: - raise NotImplementedError - - # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 - dtype = hidden_states.dtype - if dtype == torch.bfloat16: - hidden_states = hidden_states.to(torch.float32) - - # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 - if hidden_states.shape[0] >= 64: - hidden_states = hidden_states.contiguous() - - # if `output_size` is passed we force the interpolation output - # size and do not make use of `scale_factor=2` - if output_size is None: - hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest") - else: - hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") - - # If the input is bfloat16, we cast back to bfloat16 - if dtype == torch.bfloat16: - hidden_states = hidden_states.to(dtype) - - if self.use_conv: - if self.name == "conv": - hidden_states = self.conv(hidden_states) - else: - hidden_states = self.Conv2d_0(hidden_states) - - return hidden_states - - -class Downsample3D(nn.Module): - def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.padding = padding - stride = 2 - self.name = name - - if use_conv: - conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding) - else: - raise NotImplementedError - - if name == "conv": - self.Conv2d_0 = conv - self.conv = conv - elif name == "Conv2d_0": - self.conv = conv - else: - self.conv = conv - - def forward(self, hidden_states): - assert hidden_states.shape[1] == self.channels - if self.use_conv and self.padding == 0: - raise NotImplementedError - - assert hidden_states.shape[1] == self.channels - hidden_states = self.conv(hidden_states) - - return hidden_states - - -class ResnetBlock3D(nn.Module): - def __init__( - self, - *, - in_channels, - out_channels=None, - conv_shortcut=False, - dropout=0.0, - temb_channels=512, - groups=32, - groups_out=None, - pre_norm=True, - eps=1e-6, - non_linearity="swish", - time_embedding_norm="default", - output_scale_factor=1.0, - use_in_shortcut=None, - ): - super().__init__() - self.pre_norm = pre_norm - self.pre_norm = True - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - self.time_embedding_norm = time_embedding_norm - self.output_scale_factor = output_scale_factor - - if groups_out is None: - groups_out = groups - - self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) - - self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - - if temb_channels is not None: - if self.time_embedding_norm == "default": - time_emb_proj_out_channels = out_channels - elif self.time_embedding_norm == "scale_shift": - time_emb_proj_out_channels = out_channels * 2 - else: - raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ") - - self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels) - else: - self.time_emb_proj = None - - self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - - self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut - - self.conv_shortcut = None - if self.use_in_shortcut: - self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, input_tensor, temb): - hidden_states = input_tensor - - hidden_states = self.norm1(hidden_states) - hidden_states = self.nonlinearity(hidden_states) - - hidden_states = self.conv1(hidden_states) - - if temb is not None: - temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None] - - if temb is not None and self.time_embedding_norm == "default": - hidden_states = hidden_states + temb - - hidden_states = self.norm2(hidden_states) - - if temb is not None and self.time_embedding_norm == "scale_shift": - scale, shift = torch.chunk(temb, 2, dim=1) - hidden_states = hidden_states * (1 + scale) + shift - - hidden_states = self.nonlinearity(hidden_states) - - hidden_states = self.dropout(hidden_states) - hidden_states = self.conv2(hidden_states) - - if self.conv_shortcut is not None: - input_tensor = self.conv_shortcut(input_tensor) - - output_tensor = (input_tensor + hidden_states) / self.output_scale_factor - - return output_tensor - - -class Mish(torch.nn.Module): - def forward(self, hidden_states): - return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states)) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py deleted file mode 100644 index 06526203911de55da3c2a8c5ae73f48024c3f018..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,352 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - "Unicode set for the Basic Multilingual Plane" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - # fmt: on - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane - -# add language identifiers using language Unicode -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py deleted file mode 100644 index 71f66bd03cb713a2190853bdf7170c4ea80d2425..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +++ /dev/null @@ -1,104 +0,0 @@ -import types -import functools - - -# from jaraco.functools 3.3 -def method_cache(method, cache_wrapper=None): - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - cache_wrapper = cache_wrapper or functools.lru_cache() - - def wrapper(self, *args, **kwargs): - # it's the first call, replace the method with a cached, bound method - bound_method = types.MethodType(method, self) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None - - return wrapper - - -# From jaraco.functools 3.3 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py deleted file mode 100644 index 11a1c6be28ad008b7c083c229bb0df644ec58a0e..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py +++ /dev/null @@ -1,457 +0,0 @@ -"""setuptools.command.bdist_egg - -Build .egg distributions""" - -from distutils.dir_util import remove_tree, mkpath -from distutils import log -from types import CodeType -import sys -import os -import re -import textwrap -import marshal - -from pkg_resources import get_build_platform, Distribution -from setuptools.extension import Library -from setuptools import Command -from .._path import ensure_directory - -from sysconfig import get_path, get_python_version - - -def _get_purelib(): - return get_path("purelib") - - -def strip_module(filename): - if '.' in filename: - filename = os.path.splitext(filename)[0] - if filename.endswith('module'): - filename = filename[:-6] - return filename - - -def sorted_walk(dir): - """Do os.walk in a reproducible way, - independent of indeterministic filesystem readdir order - """ - for base, dirs, files in os.walk(dir): - dirs.sort() - files.sort() - yield base, dirs, files - - -def write_stub(resource, pyfile): - _stub_template = textwrap.dedent(""" - def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, importlib.util - __file__ = pkg_resources.resource_filename(__name__, %r) - __loader__ = None; del __bootstrap__, __loader__ - spec = importlib.util.spec_from_file_location(__name__,__file__) - mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) - __bootstrap__() - """).lstrip() - with open(pyfile, 'w') as f: - f.write(_stub_template % resource) - - -class bdist_egg(Command): - description = "create an \"egg\" distribution" - - user_options = [ - ('bdist-dir=', 'b', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', "platform name to embed in generated filenames " - "(default: %s)" % get_build_platform()), - ('exclude-source-files', None, - "remove all .py files from the generated egg"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ] - - boolean_options = [ - 'keep-temp', 'skip-build', 'exclude-source-files' - ] - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = 0 - self.egg_output = None - self.exclude_source_files = None - - def finalize_options(self): - ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") - self.egg_info = ei_cmd.egg_info - - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'egg') - - if self.plat_name is None: - self.plat_name = get_build_platform() - - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - if self.egg_output is None: - - # Compute filename of the output egg - basename = Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version, - get_python_version(), - self.distribution.has_ext_modules() and self.plat_name - ).egg_name() - - self.egg_output = os.path.join(self.dist_dir, basename + '.egg') - - def do_install_data(self): - # Hack for packages that install data to install's --install-lib - self.get_finalized_command('install').install_lib = self.bdist_dir - - site_packages = os.path.normcase(os.path.realpath(_get_purelib())) - old, self.distribution.data_files = self.distribution.data_files, [] - - for item in old: - if isinstance(item, tuple) and len(item) == 2: - if os.path.isabs(item[0]): - realpath = os.path.realpath(item[0]) - normalized = os.path.normcase(realpath) - if normalized == site_packages or normalized.startswith( - site_packages + os.sep - ): - item = realpath[len(site_packages) + 1:], item[1] - # XXX else: raise ??? - self.distribution.data_files.append(item) - - try: - log.info("installing package data to %s", self.bdist_dir) - self.call_command('install_data', force=0, root=None) - finally: - self.distribution.data_files = old - - def get_outputs(self): - return [self.egg_output] - - def call_command(self, cmdname, **kw): - """Invoke reinitialized command `cmdname` with keyword args""" - for dirname in INSTALL_DIRECTORY_ATTRS: - kw.setdefault(dirname, self.bdist_dir) - kw.setdefault('skip_build', self.skip_build) - kw.setdefault('dry_run', self.dry_run) - cmd = self.reinitialize_command(cmdname, **kw) - self.run_command(cmdname) - return cmd - - def run(self): # noqa: C901 # is too complex (14) # FIXME - # Generate metadata first - self.run_command("egg_info") - # We run install_lib before install_data, because some data hacks - # pull their data path from the install_lib command. - log.info("installing library code to %s", self.bdist_dir) - instcmd = self.get_finalized_command('install') - old_root = instcmd.root - instcmd.root = None - if self.distribution.has_c_libraries() and not self.skip_build: - self.run_command('build_clib') - cmd = self.call_command('install_lib', warn_dir=0) - instcmd.root = old_root - - all_outputs, ext_outputs = self.get_ext_outputs() - self.stubs = [] - to_compile = [] - for (p, ext_name) in enumerate(ext_outputs): - filename, ext = os.path.splitext(ext_name) - pyfile = os.path.join(self.bdist_dir, strip_module(filename) + - '.py') - self.stubs.append(pyfile) - log.info("creating stub loader for %s", ext_name) - if not self.dry_run: - write_stub(os.path.basename(ext_name), pyfile) - to_compile.append(pyfile) - ext_outputs[p] = ext_name.replace(os.sep, '/') - - if to_compile: - cmd.byte_compile(to_compile) - if self.distribution.data_files: - self.do_install_data() - - # Make the EGG-INFO directory - archive_root = self.bdist_dir - egg_info = os.path.join(archive_root, 'EGG-INFO') - self.mkpath(egg_info) - if self.distribution.scripts: - script_dir = os.path.join(egg_info, 'scripts') - log.info("installing scripts to %s", script_dir) - self.call_command('install_scripts', install_dir=script_dir, - no_ep=1) - - self.copy_metadata_to(egg_info) - native_libs = os.path.join(egg_info, "native_libs.txt") - if all_outputs: - log.info("writing %s", native_libs) - if not self.dry_run: - ensure_directory(native_libs) - libs_file = open(native_libs, 'wt') - libs_file.write('\n'.join(all_outputs)) - libs_file.write('\n') - libs_file.close() - elif os.path.isfile(native_libs): - log.info("removing %s", native_libs) - if not self.dry_run: - os.unlink(native_libs) - - write_safety_flag( - os.path.join(archive_root, 'EGG-INFO'), self.zip_safe() - ) - - if os.path.exists(os.path.join(self.egg_info, 'depends.txt')): - log.warn( - "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - if self.exclude_source_files: - self.zap_pyfiles() - - # Make the archive - make_zipfile(self.egg_output, archive_root, verbose=self.verbose, - dry_run=self.dry_run, mode=self.gen_header()) - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - # Add to 'Distribution.dist_files' so that the "upload" command works - getattr(self.distribution, 'dist_files', []).append( - ('bdist_egg', get_python_version(), self.egg_output)) - - def zap_pyfiles(self): - log.info("Removing .py files from temporary directory") - for base, dirs, files in walk_egg(self.bdist_dir): - for name in files: - path = os.path.join(base, name) - - if name.endswith('.py'): - log.debug("Deleting %s", path) - os.unlink(path) - - if base.endswith('__pycache__'): - path_old = path - - pattern = r'(?P.+)\.(?P[^.]+)\.pyc' - m = re.match(pattern, name) - path_new = os.path.join( - base, os.pardir, m.group('name') + '.pyc') - log.info( - "Renaming file from [%s] to [%s]" - % (path_old, path_new)) - try: - os.remove(path_new) - except OSError: - pass - os.rename(path_old, path_new) - - def zip_safe(self): - safe = getattr(self.distribution, 'zip_safe', None) - if safe is not None: - return safe - log.warn("zip_safe flag not set; analyzing archive contents...") - return analyze_egg(self.bdist_dir, self.stubs) - - def gen_header(self): - return 'w' - - def copy_metadata_to(self, target_dir): - "Copy metadata (egg info) to the target_dir" - # normalize the path (so that a forward-slash in egg_info will - # match using startswith below) - norm_egg_info = os.path.normpath(self.egg_info) - prefix = os.path.join(norm_egg_info, '') - for path in self.ei_cmd.filelist.files: - if path.startswith(prefix): - target = os.path.join(target_dir, path[len(prefix):]) - ensure_directory(target) - self.copy_file(path, target) - - def get_ext_outputs(self): - """Get a list of relative paths to C extensions in the output distro""" - - all_outputs = [] - ext_outputs = [] - - paths = {self.bdist_dir: ''} - for base, dirs, files in sorted_walk(self.bdist_dir): - for filename in files: - if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: - all_outputs.append(paths[base] + filename) - for filename in dirs: - paths[os.path.join(base, filename)] = (paths[base] + - filename + '/') - - if self.distribution.has_ext_modules(): - build_cmd = self.get_finalized_command('build_ext') - for ext in build_cmd.extensions: - if isinstance(ext, Library): - continue - fullname = build_cmd.get_ext_fullname(ext.name) - filename = build_cmd.get_ext_filename(fullname) - if not os.path.basename(filename).startswith('dl-'): - if os.path.exists(os.path.join(self.bdist_dir, filename)): - ext_outputs.append(filename) - - return all_outputs, ext_outputs - - -NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) - - -def walk_egg(egg_dir): - """Walk an unpacked egg's contents, skipping the metadata directory""" - walker = sorted_walk(egg_dir) - base, dirs, files = next(walker) - if 'EGG-INFO' in dirs: - dirs.remove('EGG-INFO') - yield base, dirs, files - for bdf in walker: - yield bdf - - -def analyze_egg(egg_dir, stubs): - # check for existing flag in EGG-INFO - for flag, fn in safety_flags.items(): - if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)): - return flag - if not can_scan(): - return False - safe = True - for base, dirs, files in walk_egg(egg_dir): - for name in files: - if name.endswith('.py') or name.endswith('.pyw'): - continue - elif name.endswith('.pyc') or name.endswith('.pyo'): - # always scan, even if we already know we're not safe - safe = scan_module(egg_dir, base, name, stubs) and safe - return safe - - -def write_safety_flag(egg_dir, safe): - # Write or remove zip safety flag file(s) - for flag, fn in safety_flags.items(): - fn = os.path.join(egg_dir, fn) - if os.path.exists(fn): - if safe is None or bool(safe) != flag: - os.unlink(fn) - elif safe is not None and bool(safe) == flag: - f = open(fn, 'wt') - f.write('\n') - f.close() - - -safety_flags = { - True: 'zip-safe', - False: 'not-zip-safe', -} - - -def scan_module(egg_dir, base, name, stubs): - """Check whether module possibly uses unsafe-for-zipfile stuff""" - - filename = os.path.join(base, name) - if filename[:-1] in stubs: - return True # Extension module - pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') - module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] - if sys.version_info < (3, 7): - skip = 12 # skip magic & date & file size - else: - skip = 16 # skip magic & reserved? & date & file size - f = open(filename, 'rb') - f.read(skip) - code = marshal.load(f) - f.close() - safe = True - symbols = dict.fromkeys(iter_symbols(code)) - for bad in ['__file__', '__path__']: - if bad in symbols: - log.warn("%s: module references %s", module, bad) - safe = False - if 'inspect' in symbols: - for bad in [ - 'getsource', 'getabsfile', 'getsourcefile', 'getfile' - 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', - 'getinnerframes', 'getouterframes', 'stack', 'trace' - ]: - if bad in symbols: - log.warn("%s: module MAY be using inspect.%s", module, bad) - safe = False - return safe - - -def iter_symbols(code): - """Yield names and strings used by `code` and its nested code objects""" - for name in code.co_names: - yield name - for const in code.co_consts: - if isinstance(const, str): - yield const - elif isinstance(const, CodeType): - for name in iter_symbols(const): - yield name - - -def can_scan(): - if not sys.platform.startswith('java') and sys.platform != 'cli': - # CPython, PyPy, etc. - return True - log.warn("Unable to analyze compiled code on this platform.") - log.warn("Please ask the author to include a 'zip_safe'" - " setting (either True or False) in the package's setup.py") - - -# Attribute names of options for commands that might need to be convinced to -# install to the egg build directory - -INSTALL_DIRECTORY_ATTRS = [ - 'install_lib', 'install_dir', 'install_data', 'install_base' -] - - -def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, - mode='w'): - """Create a zip file from all the files under 'base_dir'. The output - zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" - Python module (if available) or the InfoZIP "zip" utility (if installed - and found on the default search path). If neither tool is available, - raises DistutilsExecError. Returns the name of the output zip file. - """ - import zipfile - - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) - - def visit(z, dirname, names): - for name in names: - path = os.path.normpath(os.path.join(dirname, name)) - if os.path.isfile(path): - p = path[len(base_dir) + 1:] - if not dry_run: - z.write(path, p) - log.debug("adding '%s'", p) - - compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED - if not dry_run: - z = zipfile.ZipFile(zip_filename, mode, compression=compression) - for dirname, dirs, files in sorted_walk(base_dir): - visit(z, dirname, files) - z.close() - else: - for dirname, dirs, files in sorted_walk(base_dir): - visit(None, dirname, files) - return zip_filename diff --git a/spaces/Awesimo/jojogan/e4e_projection.py b/spaces/Awesimo/jojogan/e4e_projection.py deleted file mode 100644 index e05cc860e9a7f7fa7693589ed45b483d5d3badaa..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/e4e_projection.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import sys -import numpy as np -from PIL import Image -import torch -import torchvision.transforms as transforms -from argparse import Namespace -from e4e.models.psp import pSp -from util import * - - - -@ torch.no_grad() -def projection(img, name, device='cuda'): - - - model_path = 'e4e_ffhq_encode.pt' - ckpt = torch.load(model_path, map_location='cpu') - opts = ckpt['opts'] - opts['checkpoint_path'] = model_path - opts= Namespace(**opts) - net = pSp(opts, device).eval().to(device) - - transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(256), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - img = transform(img).unsqueeze(0).to(device) - images, w_plus = net(img, randomize_noise=False, return_latents=True) - result_file = {} - result_file['latent'] = w_plus[0] - torch.save(result_file, name) - return w_plus[0] diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/objects365.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/objects365.py deleted file mode 100644 index 41395bdd53b67b7a7111f06564c3a2d2b63a7cdc..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/objects365.py +++ /dev/null @@ -1,394 +0,0 @@ -from detectron2.data.datasets.register_coco import register_coco_instances -import os - -categories_v1 = [ -{'id': 164, 'name': 'cutting/chopping board'} , -{'id': 49, 'name': 'tie'} , -{'id': 306, 'name': 'crosswalk sign'} , -{'id': 145, 'name': 'gun'} , -{'id': 14, 'name': 'street lights'} , -{'id': 223, 'name': 'bar soap'} , -{'id': 74, 'name': 'wild bird'} , -{'id': 219, 'name': 'ice cream'} , -{'id': 37, 'name': 'stool'} , -{'id': 25, 'name': 'storage box'} , -{'id': 153, 'name': 'giraffe'} , -{'id': 52, 'name': 'pen/pencil'} , -{'id': 61, 'name': 'high heels'} , -{'id': 340, 'name': 'mangosteen'} , -{'id': 22, 'name': 'bracelet'} , -{'id': 155, 'name': 'piano'} , -{'id': 162, 'name': 'vent'} , -{'id': 75, 'name': 'laptop'} , -{'id': 236, 'name': 'toaster'} , -{'id': 231, 'name': 'fire truck'} , -{'id': 42, 'name': 'basket'} , -{'id': 150, 'name': 'zebra'} , -{'id': 124, 'name': 'head phone'} , -{'id': 90, 'name': 'sheep'} , -{'id': 322, 'name': 'steak'} , -{'id': 39, 'name': 'couch'} , -{'id': 209, 'name': 'toothbrush'} , -{'id': 59, 'name': 'bicycle'} , -{'id': 336, 'name': 'red cabbage'} , -{'id': 228, 'name': 'golf ball'} , -{'id': 120, 'name': 'tomato'} , -{'id': 132, 'name': 'computer box'} , -{'id': 8, 'name': 'cup'} , -{'id': 183, 'name': 'basketball'} , -{'id': 298, 'name': 'butterfly'} , -{'id': 250, 'name': 'garlic'} , -{'id': 12, 'name': 'desk'} , -{'id': 141, 'name': 'microwave'} , -{'id': 171, 'name': 'strawberry'} , -{'id': 200, 'name': 'kettle'} , -{'id': 63, 'name': 'van'} , -{'id': 300, 'name': 'cheese'} , -{'id': 215, 'name': 'marker'} , -{'id': 100, 'name': 'blackboard/whiteboard'} , -{'id': 186, 'name': 'printer'} , -{'id': 333, 'name': 'bread/bun'} , -{'id': 243, 'name': 'penguin'} , -{'id': 364, 'name': 'iron'} , -{'id': 180, 'name': 'ladder'} , -{'id': 34, 'name': 'flag'} , -{'id': 78, 'name': 'cell phone'} , -{'id': 97, 'name': 'fan'} , -{'id': 224, 'name': 'scale'} , -{'id': 151, 'name': 'duck'} , -{'id': 319, 'name': 'flute'} , -{'id': 156, 'name': 'stop sign'} , -{'id': 290, 'name': 'rickshaw'} , -{'id': 128, 'name': 'sailboat'} , -{'id': 165, 'name': 'tennis racket'} , -{'id': 241, 'name': 'cigar'} , -{'id': 101, 'name': 'balloon'} , -{'id': 308, 'name': 'hair drier'} , -{'id': 167, 'name': 'skating and skiing shoes'} , -{'id': 237, 'name': 'helicopter'} , -{'id': 65, 'name': 'sink'} , -{'id': 129, 'name': 'tangerine'} , -{'id': 330, 'name': 'crab'} , -{'id': 320, 'name': 'measuring cup'} , -{'id': 260, 'name': 'fishing rod'} , -{'id': 346, 'name': 'saw'} , -{'id': 216, 'name': 'ship'} , -{'id': 46, 'name': 'coffee table'} , -{'id': 194, 'name': 'facial mask'} , -{'id': 281, 'name': 'stapler'} , -{'id': 118, 'name': 'refrigerator'} , -{'id': 40, 'name': 'belt'} , -{'id': 349, 'name': 'starfish'} , -{'id': 87, 'name': 'hanger'} , -{'id': 116, 'name': 'baseball glove'} , -{'id': 261, 'name': 'cherry'} , -{'id': 334, 'name': 'baozi'} , -{'id': 267, 'name': 'screwdriver'} , -{'id': 158, 'name': 'converter'} , -{'id': 335, 'name': 'lion'} , -{'id': 170, 'name': 'baseball'} , -{'id': 111, 'name': 'skis'} , -{'id': 136, 'name': 'broccoli'} , -{'id': 342, 'name': 'eraser'} , -{'id': 337, 'name': 'polar bear'} , -{'id': 139, 'name': 'shovel'} , -{'id': 193, 'name': 'extension cord'} , -{'id': 284, 'name': 'goldfish'} , -{'id': 174, 'name': 'pepper'} , -{'id': 138, 'name': 'stroller'} , -{'id': 328, 'name': 'yak'} , -{'id': 83, 'name': 'clock'} , -{'id': 235, 'name': 'tricycle'} , -{'id': 248, 'name': 'parking meter'} , -{'id': 274, 'name': 'trophy'} , -{'id': 324, 'name': 'binoculars'} , -{'id': 51, 'name': 'traffic light'} , -{'id': 314, 'name': 'donkey'} , -{'id': 45, 'name': 'barrel/bucket'} , -{'id': 292, 'name': 'pomegranate'} , -{'id': 13, 'name': 'handbag'} , -{'id': 262, 'name': 'tablet'} , -{'id': 68, 'name': 'apple'} , -{'id': 226, 'name': 'cabbage'} , -{'id': 23, 'name': 'flower'} , -{'id': 58, 'name': 'faucet'} , -{'id': 206, 'name': 'tong'} , -{'id': 291, 'name': 'trombone'} , -{'id': 160, 'name': 'carrot'} , -{'id': 172, 'name': 'bow tie'} , -{'id': 122, 'name': 'tent'} , -{'id': 163, 'name': 'cookies'} , -{'id': 115, 'name': 'remote'} , -{'id': 175, 'name': 'coffee machine'} , -{'id': 238, 'name': 'green beans'} , -{'id': 233, 'name': 'cello'} , -{'id': 28, 'name': 'wine glass'} , -{'id': 295, 'name': 'mushroom'} , -{'id': 344, 'name': 'scallop'} , -{'id': 125, 'name': 'lantern'} , -{'id': 123, 'name': 'shampoo/shower gel'} , -{'id': 285, 'name': 'meat balls'} , -{'id': 266, 'name': 'key'} , -{'id': 296, 'name': 'calculator'} , -{'id': 168, 'name': 'scissors'} , -{'id': 103, 'name': 'cymbal'} , -{'id': 6, 'name': 'bottle'} , -{'id': 264, 'name': 'nuts'} , -{'id': 234, 'name': 'notepaper'} , -{'id': 211, 'name': 'mango'} , -{'id': 287, 'name': 'toothpaste'} , -{'id': 196, 'name': 'chopsticks'} , -{'id': 140, 'name': 'baseball bat'} , -{'id': 244, 'name': 'hurdle'} , -{'id': 195, 'name': 'tennis ball'} , -{'id': 144, 'name': 'surveillance camera'} , -{'id': 271, 'name': 'volleyball'} , -{'id': 94, 'name': 'keyboard'} , -{'id': 339, 'name': 'seal'} , -{'id': 11, 'name': 'picture/frame'} , -{'id': 348, 'name': 'okra'} , -{'id': 191, 'name': 'sausage'} , -{'id': 166, 'name': 'candy'} , -{'id': 62, 'name': 'ring'} , -{'id': 311, 'name': 'dolphin'} , -{'id': 273, 'name': 'eggplant'} , -{'id': 84, 'name': 'drum'} , -{'id': 143, 'name': 'surfboard'} , -{'id': 288, 'name': 'antelope'} , -{'id': 204, 'name': 'clutch'} , -{'id': 207, 'name': 'slide'} , -{'id': 43, 'name': 'towel/napkin'} , -{'id': 352, 'name': 'durian'} , -{'id': 276, 'name': 'board eraser'} , -{'id': 315, 'name': 'electric drill'} , -{'id': 312, 'name': 'sushi'} , -{'id': 198, 'name': 'pie'} , -{'id': 106, 'name': 'pickup truck'} , -{'id': 176, 'name': 'bathtub'} , -{'id': 26, 'name': 'vase'} , -{'id': 133, 'name': 'elephant'} , -{'id': 256, 'name': 'sandwich'} , -{'id': 327, 'name': 'noodles'} , -{'id': 10, 'name': 'glasses'} , -{'id': 109, 'name': 'airplane'} , -{'id': 95, 'name': 'tripod'} , -{'id': 247, 'name': 'CD'} , -{'id': 121, 'name': 'machinery vehicle'} , -{'id': 365, 'name': 'flashlight'} , -{'id': 53, 'name': 'microphone'} , -{'id': 270, 'name': 'pliers'} , -{'id': 362, 'name': 'chainsaw'} , -{'id': 259, 'name': 'bear'} , -{'id': 197, 'name': 'electronic stove and gas stove'} , -{'id': 89, 'name': 'pot/pan'} , -{'id': 220, 'name': 'tape'} , -{'id': 338, 'name': 'lighter'} , -{'id': 177, 'name': 'snowboard'} , -{'id': 214, 'name': 'violin'} , -{'id': 217, 'name': 'chicken'} , -{'id': 2, 'name': 'sneakers'} , -{'id': 161, 'name': 'washing machine'} , -{'id': 131, 'name': 'kite'} , -{'id': 354, 'name': 'rabbit'} , -{'id': 86, 'name': 'bus'} , -{'id': 275, 'name': 'dates'} , -{'id': 282, 'name': 'camel'} , -{'id': 88, 'name': 'nightstand'} , -{'id': 179, 'name': 'grapes'} , -{'id': 229, 'name': 'pine apple'} , -{'id': 56, 'name': 'necklace'} , -{'id': 18, 'name': 'leather shoes'} , -{'id': 358, 'name': 'hoverboard'} , -{'id': 345, 'name': 'pencil case'} , -{'id': 359, 'name': 'pasta'} , -{'id': 157, 'name': 'radiator'} , -{'id': 201, 'name': 'hamburger'} , -{'id': 268, 'name': 'globe'} , -{'id': 332, 'name': 'barbell'} , -{'id': 329, 'name': 'mop'} , -{'id': 252, 'name': 'horn'} , -{'id': 350, 'name': 'eagle'} , -{'id': 169, 'name': 'folder'} , -{'id': 137, 'name': 'toilet'} , -{'id': 5, 'name': 'lamp'} , -{'id': 27, 'name': 'bench'} , -{'id': 249, 'name': 'swan'} , -{'id': 76, 'name': 'knife'} , -{'id': 341, 'name': 'comb'} , -{'id': 64, 'name': 'watch'} , -{'id': 105, 'name': 'telephone'} , -{'id': 3, 'name': 'chair'} , -{'id': 33, 'name': 'boat'} , -{'id': 107, 'name': 'orange'} , -{'id': 60, 'name': 'bread'} , -{'id': 147, 'name': 'cat'} , -{'id': 135, 'name': 'gas stove'} , -{'id': 307, 'name': 'papaya'} , -{'id': 227, 'name': 'router/modem'} , -{'id': 357, 'name': 'asparagus'} , -{'id': 73, 'name': 'motorcycle'} , -{'id': 77, 'name': 'traffic sign'} , -{'id': 67, 'name': 'fish'} , -{'id': 326, 'name': 'radish'} , -{'id': 213, 'name': 'egg'} , -{'id': 203, 'name': 'cucumber'} , -{'id': 17, 'name': 'helmet'} , -{'id': 110, 'name': 'luggage'} , -{'id': 80, 'name': 'truck'} , -{'id': 199, 'name': 'frisbee'} , -{'id': 232, 'name': 'peach'} , -{'id': 1, 'name': 'person'} , -{'id': 29, 'name': 'boots'} , -{'id': 310, 'name': 'chips'} , -{'id': 142, 'name': 'skateboard'} , -{'id': 44, 'name': 'slippers'} , -{'id': 4, 'name': 'hat'} , -{'id': 178, 'name': 'suitcase'} , -{'id': 24, 'name': 'tv'} , -{'id': 119, 'name': 'train'} , -{'id': 82, 'name': 'power outlet'} , -{'id': 245, 'name': 'swing'} , -{'id': 15, 'name': 'book'} , -{'id': 294, 'name': 'jellyfish'} , -{'id': 192, 'name': 'fire extinguisher'} , -{'id': 212, 'name': 'deer'} , -{'id': 181, 'name': 'pear'} , -{'id': 347, 'name': 'table tennis paddle'} , -{'id': 113, 'name': 'trolley'} , -{'id': 91, 'name': 'guitar'} , -{'id': 202, 'name': 'golf club'} , -{'id': 221, 'name': 'wheelchair'} , -{'id': 254, 'name': 'saxophone'} , -{'id': 117, 'name': 'paper towel'} , -{'id': 303, 'name': 'race car'} , -{'id': 240, 'name': 'carriage'} , -{'id': 246, 'name': 'radio'} , -{'id': 318, 'name': 'parrot'} , -{'id': 251, 'name': 'french fries'} , -{'id': 98, 'name': 'dog'} , -{'id': 112, 'name': 'soccer'} , -{'id': 355, 'name': 'french horn'} , -{'id': 79, 'name': 'paddle'} , -{'id': 283, 'name': 'lettuce'} , -{'id': 9, 'name': 'car'} , -{'id': 258, 'name': 'kiwi fruit'} , -{'id': 325, 'name': 'llama'} , -{'id': 187, 'name': 'billiards'} , -{'id': 210, 'name': 'facial cleanser'} , -{'id': 81, 'name': 'cow'} , -{'id': 331, 'name': 'microscope'} , -{'id': 148, 'name': 'lemon'} , -{'id': 302, 'name': 'pomelo'} , -{'id': 85, 'name': 'fork'} , -{'id': 154, 'name': 'pumpkin'} , -{'id': 289, 'name': 'shrimp'} , -{'id': 71, 'name': 'teddy bear'} , -{'id': 184, 'name': 'potato'} , -{'id': 102, 'name': 'air conditioner'} , -{'id': 208, 'name': 'hot dog'} , -{'id': 222, 'name': 'plum'} , -{'id': 316, 'name': 'spring rolls'} , -{'id': 230, 'name': 'crane'} , -{'id': 149, 'name': 'liquid soap'} , -{'id': 55, 'name': 'canned'} , -{'id': 35, 'name': 'speaker'} , -{'id': 108, 'name': 'banana'} , -{'id': 297, 'name': 'treadmill'} , -{'id': 99, 'name': 'spoon'} , -{'id': 104, 'name': 'mouse'} , -{'id': 182, 'name': 'american football'} , -{'id': 299, 'name': 'egg tart'} , -{'id': 127, 'name': 'cleaning products'} , -{'id': 313, 'name': 'urinal'} , -{'id': 286, 'name': 'medal'} , -{'id': 239, 'name': 'brush'} , -{'id': 96, 'name': 'hockey'} , -{'id': 279, 'name': 'dumbbell'} , -{'id': 32, 'name': 'umbrella'} , -{'id': 272, 'name': 'hammer'} , -{'id': 16, 'name': 'plate'} , -{'id': 21, 'name': 'potted plant'} , -{'id': 242, 'name': 'earphone'} , -{'id': 70, 'name': 'candle'} , -{'id': 185, 'name': 'paint brush'} , -{'id': 48, 'name': 'toy'} , -{'id': 130, 'name': 'pizza'} , -{'id': 255, 'name': 'trumpet'} , -{'id': 361, 'name': 'hotair balloon'} , -{'id': 188, 'name': 'fire hydrant'} , -{'id': 50, 'name': 'bed'} , -{'id': 253, 'name': 'avocado'} , -{'id': 293, 'name': 'coconut'} , -{'id': 257, 'name': 'cue'} , -{'id': 280, 'name': 'hamimelon'} , -{'id': 66, 'name': 'horse'} , -{'id': 173, 'name': 'pigeon'} , -{'id': 190, 'name': 'projector'} , -{'id': 69, 'name': 'camera'} , -{'id': 30, 'name': 'bowl'} , -{'id': 269, 'name': 'broom'} , -{'id': 343, 'name': 'pitaya'} , -{'id': 305, 'name': 'tuba'} , -{'id': 309, 'name': 'green onion'} , -{'id': 363, 'name': 'lobster'} , -{'id': 225, 'name': 'watermelon'} , -{'id': 47, 'name': 'suv'} , -{'id': 31, 'name': 'dining table'} , -{'id': 54, 'name': 'sandals'} , -{'id': 351, 'name': 'monkey'} , -{'id': 218, 'name': 'onion'} , -{'id': 36, 'name': 'trash bin/can'} , -{'id': 20, 'name': 'glove'} , -{'id': 277, 'name': 'rice'} , -{'id': 152, 'name': 'sports car'} , -{'id': 360, 'name': 'target'} , -{'id': 205, 'name': 'blender'} , -{'id': 19, 'name': 'pillow'} , -{'id': 72, 'name': 'cake'} , -{'id': 93, 'name': 'tea pot'} , -{'id': 353, 'name': 'game board'} , -{'id': 38, 'name': 'backpack'} , -{'id': 356, 'name': 'ambulance'} , -{'id': 146, 'name': 'life saver'} , -{'id': 189, 'name': 'goose'} , -{'id': 278, 'name': 'tape measure/ruler'} , -{'id': 92, 'name': 'traffic cone'} , -{'id': 134, 'name': 'toiletries'} , -{'id': 114, 'name': 'oven'} , -{'id': 317, 'name': 'tortoise/turtle'} , -{'id': 265, 'name': 'corn'} , -{'id': 126, 'name': 'donut'} , -{'id': 57, 'name': 'mirror'} , -{'id': 7, 'name': 'cabinet/shelf'} , -{'id': 263, 'name': 'green vegetables'} , -{'id': 159, 'name': 'tissue '} , -{'id': 321, 'name': 'shark'} , -{'id': 301, 'name': 'pig'} , -{'id': 41, 'name': 'carpet'} , -{'id': 304, 'name': 'rice cooker'} , -{'id': 323, 'name': 'poker card'} , -] - -def _get_builtin_metadata(version): - if version == 'v1': - id_to_name = {x['id']: x['name'] for x in categories_v1} - else: - assert 0, version - thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(365)} - thing_classes = [id_to_name[k] for k in sorted(id_to_name)] - return { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes} - -_PREDEFINED_SPLITS_OBJECTS365 = { - "objects365_train": ("objects365/train", "objects365/annotations/objects365_train.json"), - "objects365_val": ("objects365/val", "objects365/annotations/objects365_val.json"), -} - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_OBJECTS365.items(): - register_coco_instances( - key, - _get_builtin_metadata('v1'), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/benchmark.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/benchmark.py deleted file mode 100644 index aaac56400148f7b140b7c1356bbbc3b4293e5ce3..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/benchmark.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -""" -A script to benchmark builtin models. - -Note: this script has an extra dependency of psutil. -""" - -import itertools -import logging -import psutil -import torch -import tqdm -from fvcore.common.timer import Timer -from torch.nn.parallel import DistributedDataParallel - -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import LazyConfig, get_cfg, instantiate -from detectron2.data import ( - DatasetFromList, - build_detection_test_loader, - build_detection_train_loader, -) -from detectron2.data.benchmark import DataLoaderBenchmark -from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch -from detectron2.modeling import build_model -from detectron2.solver import build_optimizer -from detectron2.utils import comm -from detectron2.utils.collect_env import collect_env_info -from detectron2.utils.events import CommonMetricPrinter -from detectron2.utils.logger import setup_logger - -logger = logging.getLogger("detectron2") - - -def setup(args): - if args.config_file.endswith(".yaml"): - cfg = get_cfg() - cfg.merge_from_file(args.config_file) - cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. - cfg.merge_from_list(args.opts) - cfg.freeze() - else: - cfg = LazyConfig.load(args.config_file) - cfg = LazyConfig.apply_overrides(cfg, args.opts) - setup_logger(distributed_rank=comm.get_rank()) - return cfg - - -def create_data_benchmark(cfg, args): - if args.config_file.endswith(".py"): - dl_cfg = cfg.dataloader.train - dl_cfg._target_ = DataLoaderBenchmark - return instantiate(dl_cfg) - else: - kwargs = build_detection_train_loader.from_config(cfg) - kwargs.pop("aspect_ratio_grouping", None) - kwargs["_target_"] = DataLoaderBenchmark - return instantiate(kwargs) - - -def RAM_msg(): - vram = psutil.virtual_memory() - return "RAM Usage: {:.2f}/{:.2f} GB".format( - (vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3 - ) - - -def benchmark_data(args): - cfg = setup(args) - logger.info("After spawning " + RAM_msg()) - - benchmark = create_data_benchmark(cfg, args) - benchmark.benchmark_distributed(250, 10) - # test for a few more rounds - for k in range(10): - logger.info(f"Iteration {k} " + RAM_msg()) - benchmark.benchmark_distributed(250, 1) - - -def benchmark_data_advanced(args): - # benchmark dataloader with more details to help analyze performance bottleneck - cfg = setup(args) - benchmark = create_data_benchmark(cfg, args) - - if comm.get_rank() == 0: - benchmark.benchmark_dataset(100) - benchmark.benchmark_mapper(100) - benchmark.benchmark_workers(100, warmup=10) - benchmark.benchmark_IPC(100, warmup=10) - if comm.get_world_size() > 1: - benchmark.benchmark_distributed(100) - logger.info("Rerun ...") - benchmark.benchmark_distributed(100) - - -def benchmark_train(args): - cfg = setup(args) - model = build_model(cfg) - logger.info("Model:\n{}".format(model)) - if comm.get_world_size() > 1: - model = DistributedDataParallel( - model, device_ids=[comm.get_local_rank()], broadcast_buffers=False - ) - optimizer = build_optimizer(cfg, model) - checkpointer = DetectionCheckpointer(model, optimizer=optimizer) - checkpointer.load(cfg.MODEL.WEIGHTS) - - cfg.defrost() - cfg.DATALOADER.NUM_WORKERS = 2 - data_loader = build_detection_train_loader(cfg) - dummy_data = list(itertools.islice(data_loader, 100)) - - def f(): - data = DatasetFromList(dummy_data, copy=False, serialize=False) - while True: - yield from data - - max_iter = 400 - trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer) - trainer.register_hooks( - [ - hooks.IterationTimer(), - hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]), - hooks.TorchProfiler( - lambda trainer: trainer.iter == max_iter - 1, cfg.OUTPUT_DIR, save_tensorboard=True - ), - ] - ) - trainer.train(1, max_iter) - - -@torch.no_grad() -def benchmark_eval(args): - cfg = setup(args) - if args.config_file.endswith(".yaml"): - model = build_model(cfg) - DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) - - cfg.defrost() - cfg.DATALOADER.NUM_WORKERS = 0 - data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) - else: - model = instantiate(cfg.model) - model.to(cfg.train.device) - DetectionCheckpointer(model).load(cfg.train.init_checkpoint) - - cfg.dataloader.num_workers = 0 - data_loader = instantiate(cfg.dataloader.test) - - model.eval() - logger.info("Model:\n{}".format(model)) - dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False) - - def f(): - while True: - yield from dummy_data - - for k in range(5): # warmup - model(dummy_data[k]) - - max_iter = 300 - timer = Timer() - with tqdm.tqdm(total=max_iter) as pbar: - for idx, d in enumerate(f()): - if idx == max_iter: - break - model(d) - pbar.update() - logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) - - -if __name__ == "__main__": - parser = default_argument_parser() - parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True) - args = parser.parse_args() - assert not args.eval_only - - logger.info("Environment info:\n" + collect_env_info()) - if "data" in args.task: - print("Initial " + RAM_msg()) - if args.task == "data": - f = benchmark_data - if args.task == "data_advanced": - f = benchmark_data_advanced - elif args.task == "train": - """ - Note: training speed may not be representative. - The training cost of a R-CNN model varies with the content of the data - and the quality of the model. - """ - f = benchmark_train - elif args.task == "eval": - f = benchmark_eval - # only benchmark single-GPU inference. - assert args.num_gpus == 1 and args.num_machines == 1 - launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,)) diff --git a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py b/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py deleted file mode 100644 index a9634fd51ff47bf90211839231774719154c37cf..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py +++ /dev/null @@ -1,672 +0,0 @@ -import hashlib -import json -import math -import os - -import librosa -import numpy as np -import soundfile as sf -from tqdm import tqdm - - -def crop_center(h1, h2): - h1_shape = h1.size() - h2_shape = h2.size() - - if h1_shape[3] == h2_shape[3]: - return h1 - elif h1_shape[3] < h2_shape[3]: - raise ValueError("h1_shape[3] must be greater than h2_shape[3]") - - # s_freq = (h2_shape[2] - h1_shape[2]) // 2 - # e_freq = s_freq + h1_shape[2] - s_time = (h1_shape[3] - h2_shape[3]) // 2 - e_time = s_time + h2_shape[3] - h1 = h1[:, :, :, s_time:e_time] - - return h1 - - -def wave_to_spectrogram( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def wave_to_spectrogram_mt( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - import threading - - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - def run_thread(**kwargs): - global spec_left - spec_left = librosa.stft(**kwargs) - - thread = threading.Thread( - target=run_thread, - kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, - ) - thread.start() - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - thread.join() - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def combine_spectrograms(specs, mp): - l = min([specs[i].shape[2] for i in specs]) - spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) - offset = 0 - bands_n = len(mp.param["band"]) - - for d in range(1, bands_n + 1): - h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] - spec_c[:, offset : offset + h, :l] = specs[d][ - :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l - ] - offset += h - - if offset > mp.param["bins"]: - raise ValueError("Too much bins") - - # lowpass fiter - if ( - mp.param["pre_filter_start"] > 0 - ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: - if bands_n == 1: - spec_c = fft_lp_filter( - spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] - ) - else: - gp = 1 - for b in range( - mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] - ): - g = math.pow( - 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 - ) - gp = g - spec_c[:, b, :] *= g - - return np.asfortranarray(spec_c) - - -def spectrogram_to_image(spec, mode="magnitude"): - if mode == "magnitude": - if np.iscomplexobj(spec): - y = np.abs(spec) - else: - y = spec - y = np.log10(y**2 + 1e-8) - elif mode == "phase": - if np.iscomplexobj(spec): - y = np.angle(spec) - else: - y = spec - - y -= y.min() - y *= 255 / y.max() - img = np.uint8(y) - - if y.ndim == 3: - img = img.transpose(1, 2, 0) - img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) - - return img - - -def reduce_vocal_aggressively(X, y, softmask): - v = X - y - y_mag_tmp = np.abs(y) - v_mag_tmp = np.abs(v) - - v_mask = v_mag_tmp > y_mag_tmp - y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) - - return y_mag * np.exp(1.0j * np.angle(y)) - - -def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): - if min_range < fade_size * 2: - raise ValueError("min_range must be >= fade_area * 2") - - mag = mag.copy() - - idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] - starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) - ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) - uninformative = np.where(ends - starts > min_range)[0] - if len(uninformative) > 0: - starts = starts[uninformative] - ends = ends[uninformative] - old_e = None - for s, e in zip(starts, ends): - if old_e is not None and s - old_e < fade_size: - s = old_e - fade_size * 2 - - if s != 0: - weight = np.linspace(0, 1, fade_size) - mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] - else: - s -= fade_size - - if e != mag.shape[2]: - weight = np.linspace(1, 0, fade_size) - mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] - else: - e += fade_size - - mag[:, :, s + fade_size : e - fade_size] += ref[ - :, :, s + fade_size : e - fade_size - ] - old_e = e - - return mag - - -def align_wave_head_and_tail(a, b): - l = min([a[0].size, b[0].size]) - - return a[:l, :l], b[:l, :l] - - -def cache_or_load(mix_path, inst_path, mp): - mix_basename = os.path.splitext(os.path.basename(mix_path))[0] - inst_basename = os.path.splitext(os.path.basename(inst_path))[0] - - cache_dir = "mph{}".format( - hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() - ) - mix_cache_dir = os.path.join("cache", cache_dir) - inst_cache_dir = os.path.join("cache", cache_dir) - - os.makedirs(mix_cache_dir, exist_ok=True) - os.makedirs(inst_cache_dir, exist_ok=True) - - mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") - inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") - - if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): - X_spec_m = np.load(mix_cache_path) - y_spec_m = np.load(inst_cache_path) - else: - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - X_wave[d], _ = librosa.load( - mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] - ) - y_wave[d], _ = librosa.load( - inst_path, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - else: # lower bands - X_wave[d] = librosa.resample( - X_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - y_wave[d] = librosa.resample( - y_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) - - X_spec_s[d] = wave_to_spectrogram( - X_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - y_spec_s[d] = wave_to_spectrogram( - y_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - del X_wave, y_wave - - X_spec_m = combine_spectrograms(X_spec_s, mp) - y_spec_m = combine_spectrograms(y_spec_s, mp) - - if X_spec_m.shape != y_spec_m.shape: - raise ValueError("The combined spectrograms are different: " + mix_path) - - _, ext = os.path.splitext(mix_path) - - np.save(mix_cache_path, X_spec_m) - np.save(inst_cache_path, y_spec_m) - - return X_spec_m, y_spec_m - - -def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hop_length) - wave_right = librosa.istft(spec_right, hop_length=hop_length) - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): - import threading - - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - def run_thread(**kwargs): - global wave_left - wave_left = librosa.istft(**kwargs) - - thread = threading.Thread( - target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} - ) - thread.start() - wave_right = librosa.istft(spec_right, hop_length=hop_length) - thread.join() - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): - wave_band = {} - bands_n = len(mp.param["band"]) - offset = 0 - - for d in range(1, bands_n + 1): - bp = mp.param["band"][d] - spec_s = np.ndarray( - shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex - ) - h = bp["crop_stop"] - bp["crop_start"] - spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ - :, offset : offset + h, : - ] - - offset += h - if d == bands_n: # higher - if extra_bins_h: # if --high_end_process bypass - max_bin = bp["n_fft"] // 2 - spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ - :, :extra_bins_h, : - ] - if bp["hpf_start"] > 0: - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - if bands_n == 1: - wave = spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - else: - wave = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - else: - sr = mp.param["band"][d + 1]["sr"] - if d == 1: # lower - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave = librosa.resample( - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - bp["sr"], - sr, - res_type="sinc_fastest", - ) - else: # mid - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave2 = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") - wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") - - return wave.T - - -def fft_lp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop): - g -= 1 / (bin_stop - bin_start) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, bin_stop:, :] *= 0 - - return spec - - -def fft_hp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop, -1): - g -= 1 / (bin_start - bin_stop) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, 0 : bin_stop + 1, :] *= 0 - - return spec - - -def mirroring(a, spec_m, input_high_end, mp): - if "mirroring" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) - - return np.where( - np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror - ) - - if "mirroring2" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mi = np.multiply(mirror, input_high_end * 1.7) - - return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) - - -def ensembling(a, specs): - for i in range(1, len(specs)): - if i == 1: - spec = specs[0] - - ln = min([spec.shape[2], specs[i].shape[2]]) - spec = spec[:, :, :ln] - specs[i] = specs[i][:, :, :ln] - - if "min_mag" == a: - spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) - if "max_mag" == a: - spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) - - return spec - - -def stft(wave, nfft, hl): - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - spec_left = librosa.stft(wave_left, nfft, hop_length=hl) - spec_right = librosa.stft(wave_right, nfft, hop_length=hl) - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def istft(spec, hl): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hl) - wave_right = librosa.istft(spec_right, hop_length=hl) - wave = np.asfortranarray([wave_left, wave_right]) - - -if __name__ == "__main__": - import argparse - import sys - import time - - import cv2 - from model_param_init import ModelParameters - - p = argparse.ArgumentParser() - p.add_argument( - "--algorithm", - "-a", - type=str, - choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], - default="min_mag", - ) - p.add_argument( - "--model_params", - "-m", - type=str, - default=os.path.join("modelparams", "1band_sr44100_hl512.json"), - ) - p.add_argument("--output_name", "-o", type=str, default="output") - p.add_argument("--vocals_only", "-v", action="store_true") - p.add_argument("input", nargs="+") - args = p.parse_args() - - start_time = time.time() - - if args.algorithm.startswith("invert") and len(args.input) != 2: - raise ValueError("There should be two input files.") - - if not args.algorithm.startswith("invert") and len(args.input) < 2: - raise ValueError("There must be at least two input files.") - - wave, specs = {}, {} - mp = ModelParameters(args.model_params) - - for i in range(len(args.input)): - spec = {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - wave[d], _ = librosa.load( - args.input[i], - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - - if len(wave[d].shape) == 1: # mono to stereo - wave[d] = np.array([wave[d], wave[d]]) - else: # lower bands - wave[d] = librosa.resample( - wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - spec[d] = wave_to_spectrogram( - wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - specs[i] = combine_spectrograms(spec, mp) - - del wave - - if args.algorithm == "deep": - d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) - v_spec = d_spec - specs[1] - sf.write( - os.path.join("{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - - if args.algorithm.startswith("invert"): - ln = min([specs[0].shape[2], specs[1].shape[2]]) - specs[0] = specs[0][:, :, :ln] - specs[1] = specs[1][:, :, :ln] - - if "invert_p" == args.algorithm: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) - v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) - else: - specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) - v_spec = specs[0] - specs[1] - - if not args.vocals_only: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - v_mag = np.abs(v_spec) - - X_image = spectrogram_to_image(X_mag) - y_image = spectrogram_to_image(y_mag) - v_image = spectrogram_to_image(v_mag) - - cv2.imwrite("{}_X.png".format(args.output_name), X_image) - cv2.imwrite("{}_y.png".format(args.output_name), y_image) - cv2.imwrite("{}_v.png".format(args.output_name), v_image) - - sf.write( - "{}_X.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[0], mp), - mp.param["sr"], - ) - sf.write( - "{}_y.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[1], mp), - mp.param["sr"], - ) - - sf.write( - "{}_v.wav".format(args.output_name), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - else: - if not args.algorithm == "deep": - sf.write( - os.path.join("ensembled", "{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), - mp.param["sr"], - ) - - if args.algorithm == "align": - trackalignment = [ - { - "file1": '"{}"'.format(args.input[0]), - "file2": '"{}"'.format(args.input[1]), - } - ] - - for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): - os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") - - # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/spaces/Benson/text-generation/Examples/Agar.io Indir Apk.md b/spaces/Benson/text-generation/Examples/Agar.io Indir Apk.md deleted file mode 100644 index 9c715644ad8767cb15b0a90f828d689dfb584cb1..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Agar.io Indir Apk.md +++ /dev/null @@ -1,110 +0,0 @@ - -

      Agar.io Indir Apk: Cómo descargar y jugar el popular juego en línea

      -

      ¿Estás buscando un juego online divertido y adictivo que puedas jugar en tu dispositivo Android? Si es así, es posible que desee probar Agar.io, un juego de acción en línea multijugador masivo que tiene millones de fans en todo el mundo. En este artículo, le diremos qué es Agar.io, por qué debe descargar su archivo apk, cómo descargarlo e instalarlo, cómo jugarlo en línea con amigos y cuáles son las revisiones del juego. ¡Vamos a empezar!

      -

      ¿Qué es Agar.io?

      -

      Agar.io es un juego creado por el desarrollador brasileño Matheus Valadares en 2015. Se basa en el concepto de comer agar, una sustancia utilizada para cultivar bacterias en una placa de Petri. En el juego, controlas una célula circular que puede comer células más pequeñas y pellets de agar para crecer más grande, evitando las células más grandes que pueden comerte. El juego tiene un modo de juego simple pero adictivo que atrae a jugadores de todas las edades y orígenes.

      -

      agar.io indir apk


      Download Zip >>>>> https://bltlly.com/2v6K4H



      -

      El juego de Agar.io

      -

      La jugabilidad de Agar.io es fácil de aprender pero difícil de dominar. Comienza con una celda pequeña que puede moverse por el mapa usando el dedo o el ratón. Usted puede comer pellets de agar que se dispersan al azar alrededor del mapa para aumentar su masa ligeramente, o puede comer otras células que son más pequeñas que usted para aumentar su masa significativamente. Sin embargo, también tienes que tener cuidado con otras células que son más grandes que tú, ya que pueden comerte y terminar tu juego.

      -

      También puede utilizar dos botones para mejorar su juego. El botón de división le permite dividir su celda en dos celdas más pequeñas que pueden moverse más rápido y comer células más pequeñas más fácilmente. Sin embargo, la división también lo hace más vulnerable a las células más grandes que pueden comer sus células más pequeñas. El botón de expulsión le permite expulsar algo de masa de su celda en la dirección que está apuntando. Esto se puede utilizar para alimentar otras células, disparar virus a ellos, o escapar de ellos.

      -

      Las características de Agar.io

      - -
        -
      • Múltiples modos de juego: Puedes jugar en diferentes modos como FFA (Free-For-All), Battle Royale, Teams, Experimental y Party. Cada modo tiene sus propias reglas y desafíos.
      • -
      • Skins especiales: Puedes personalizar la apariencia de tu celda usando palabras, frases, símbolos o skins predefinidos. Algunas pieles son secretas y requieren nombres de usuario específicos para desbloquear.
      • -
      • Tablas de clasificación y estadísticas: Puedes ver tu rango y puntuación en la tabla de clasificación y compararlo con otros jugadores. También puede ver sus estadísticas como la mayor masa, el mayor tiempo de supervivencia, el número de células consumidas, etc.
      • -
      • Características sociales: Puedes chatear con otros jugadores en el juego o invitarlos a unirse a tu fiesta. También puedes compartir tu juego en plataformas de redes sociales como Facebook o Twitter.
      • -
      -

      ¿Por qué descargar apk Agar.io?

      -

      Si desea jugar Agar.io en su dispositivo Android, es posible que se pregunte por qué debe descargar su archivo apk en lugar de instalarlo desde la Google Play Store. Bueno, hay varias razones por las que descargar Agar.io apk es una mejor opción para usted. Estos son algunos de ellos:

      -

      Los beneficios de descargar Agar.io apk

      -

      Descargar Agar.io apk tiene muchos beneficios, tales como:

      -
        -
      • Es gratis: Usted no tiene que pagar nada para descargar y jugar apk Agar.io. Puede disfrutar del juego sin anuncios o compras en la aplicación.
      • -
      • Es rápido: No tienes que esperar a que el juego se descargue e instale desde Google Play Store. Puede descargar Agar.io apk directamente desde una fuente de confianza e instalarlo en pocos minutos.
      • -
      • Se actualiza: Usted no tiene que preocuparse por la falta de nuevas características o correcciones de errores que los desarrolladores de juegos de liberación. Siempre se puede descargar la última versión de Agar.io apk y disfrutar del juego con el mejor rendimiento y calidad.
      • - -
      -

      Los requisitos para descargar Agar.io apk

      -

      Antes de descargar Agar.io apk, es necesario asegurarse de que el dispositivo cumple con los siguientes requisitos:

      -

      -
        -
      • Versión de Android: Necesitas tener Android 4.4 o superior en tu dispositivo.
      • -
      • Espacio de almacenamiento: Necesita tener al menos 50 MB de espacio de almacenamiento gratuito en su dispositivo.
      • -
      • Conexión a Internet: Necesitas tener una conexión a Internet estable y rápida para jugar a Agar.io online.
      • -
      • Configuración de permisos: Es necesario habilitar fuentes desconocidas en la configuración de seguridad del dispositivo para instalar Agar.io apk.
      • -
      -

      Cómo descargar e instalar apk Agar.io?

      -

      Ahora que sabes por qué y cómo descargar Agar.io apk, usted puede preguntarse cómo hacerlo. No te preocupes, es muy fácil y simple. Solo tienes que seguir estos pasos:

      -

      Los pasos para descargar e instalar Agar.io apk

      -
        -
      1. Ir a un sitio web de confianza que ofrece archivo apk Agar.io, como [ApkPure] o [ApkMirror].
      2. -
      3. Buscar y haga clic en el botón de descarga para el archivo apk Agar.io. El tamaño del archivo es de aproximadamente 37 MB.
      4. -
      5. Espere a que el archivo se descargue en su dispositivo. Puede comprobar el progreso en la barra de notificaciones.
      6. -
      7. Una vez que el archivo se descarga, toque en él para abrirlo. Es posible que vea un mensaje de advertencia que dice "Este tipo de archivo puede dañar su dispositivo". Ignóralo y toca "Aceptar".
      8. -
      9. Verá una pantalla que le pide que instale Agar.io apk. Toque en "Instalar" y esperar a que el proceso de instalación termine.
      10. -
      11. Verá una pantalla que dice "App instalado". Toque en "Abrir" para iniciar apk Agar.io y empezar a jugar.
      12. -
      -

      Los consejos y trucos para jugar apk Agar.io

      -

      Si desea mejorar sus habilidades y divertirse más jugando Agar.io apk, es posible que desee aprender algunos consejos y trucos que pueden ayudarle. Estos son algunos de ellos:

      -
        - -
      • Expulsar masa estratégicamente: Expulsar masa puede ayudarte a alimentar otras células, disparar virus contra ellas o escapar de ellas, pero también puede reducir tu masa y ralentizarte. Solo expulse masa cuando tenga un propósito o plan claro.
      • -
      • Evite los virus: Los virus son células verdes puntiagudas que pueden dividirlo en muchas células más pequeñas si las toca. Evítalos a menos que quieras usarlos como un arma o un escudo contra otras celdas.
      • -
      • Usar esquinas y bordes: Las esquinas y bordes del mapa pueden ayudarlo a atrapar celdas más pequeñas u ocultarse de celdas más grandes. Úsalos cuando necesites obtener una ventaja o evitar una desventaja.
      • -
      • Equipo con otros: Asociarse con otras células puede ayudarle a sobrevivir más tiempo y dominar el mapa. Puedes formar equipo con otras personas alimentándolas, separándolas o chateando con ellas.
      • -
      -

      ¿Cómo jugar a Agar.io online con amigos?

      -

      Si quieres jugar a Agar.io online con tus amigos, quizás te preguntes cómo hacerlo. No te preocupes, es muy fácil y sencillo. Solo tienes que seguir estos pasos:

      -

      Los modos de Agar.io en línea

      -

      Agar.io online tiene diferentes modos entre los que puedes elegir, dependiendo de tu preferencia y estado de ánimo. Algunos de estos modos son:

      -
        -
      • FFA (Free-For-All): Este es el modo predeterminado donde puedes jugar solo o con jugadores aleatorios. Puede unirse a cualquier servidor e intentar convertirse en la celda más grande del mapa.
      • -
      • Battle Royale: Este es un modo en el que tienes que sobrevivir y eliminar a otros jugadores en una arena cada vez más pequeña. Puede unirse a cualquier servidor e intentar ser la última celda en pie.
      • -
      • Equipos: Este es un modo donde puedes jugar con otros jugadores en un equipo. Puedes unirte a cualquier servidor e intentar ayudar a tu equipo a dominar el mapa.
      • -
      • Experimental: Este es un modo donde puedes probar nuevas características y mecánicas en las que están trabajando los desarrolladores de juegos. Puede unirse a cualquier servidor e intentar descubrir cosas nuevas.
      • - -
      -

      Las estrategias de Agar.io online

      -

      Si quieres mejorar tus habilidades y divertirte más jugando a Agar.io online, quizás quieras aprender algunas estrategias que te pueden ayudar. Estos son algunos de ellos:

      -
        -
      • Utilice el mapa: El mapa le muestra la ubicación de otras células, pellets de agar, virus y fronteras. Utilícelo para planificar sus movimientos y evitar el peligro.
      • -
      • Usa el chat: El chat te permite comunicarte con otros jugadores del juego. Úsalo para hacer amigos, enemigos, alianzas o bromas.
      • -
      • Usa las pieles: Las pieles te permiten personalizar la apariencia de tu celda. Úsalas para expresarte, impresionar a otros o confundirlos.
      • -
      • Utilice la tabla de clasificación: La tabla de clasificación le muestra el rango y la puntuación de las 10 mejores celdas en el mapa. Úsalo para medir tu progreso, desafiar a otros o evitarlos.
      • -
      • Usa la configuración: La configuración te permite ajustar los gráficos, el sonido, los controles y otras opciones del juego. Úsalos para optimizar tu experiencia de juego y rendimiento.
      • -
      -

      ¿Cuáles son las opiniones de Agar.io apk?

      -

      Si quieres saber lo que otros jugadores piensan de Agar.io apk, es posible que desee leer algunos comentarios del juego. Aquí hay algunos ejemplos de comentarios positivos y negativos de usuarios reales:

      -

      Los comentarios positivos de Agar.io apk

      - -
NombreCalificaciónRevisión
Alice5 estrellas ¡Me encanta este juego! Es muy divertido y adictivo. Lo juego todos los días con mis amigos y nos lo pasamos genial. Los gráficos son simples pero lindo, el juego es suave y rápido, y los modos son diversos y emocionantes. Recomiendo este juego a cualquiera que le gusten los juegos en línea.
Charlie5 estrellasEste juego es increíble! Es muy simple pero adictivo. Me gusta cómo puedes personalizar tu celda con diferentes pieles y nombres, y chatear con otros jugadores en el juego. El juego es muy social y amigable. La mejor parte es que es gratis y fácil de descargar e instalar.
NombreCalificaciónRevisión
Dave2 estrellasEste juego es aburrido! Es muy repetitivo y frustrante. No me gusta cómo puedes ser comido por células más grandes o virus en un segundo, y perder todo tu progreso. El juego es muy injusto y aleatorio. La peor parte es que tiene demasiados anuncios y compras en la aplicación.
Frank3 estrellasEste juego está bien. Es muy simple y fácil de jugar. Me gusta cómo puedes jugar con otros jugadores online, pero también offline si quieres. El juego es muy casual y relajante. Lo único que no me gusta es que es demasiado básico y carece de profundidad. El juego podría usar más funciones y modos para hacerlo más interesante y divertido.