diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/DFEHub.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/DFEHub.py
deleted file mode 100644
index 1bbdd01ea392c5421cf24762b74c80c6506b904e..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/Provider/Providers/DFEHub.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-import json
-
-url = "https://chat.dfehub.com/api/chat"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
- }
- data = {
- "model": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5",
- "maxLength": 12000,
- "tokenLimit": 4000
- },
- "messages": [
- {
- "role": "user",
- "content": base
- }
- ],
- "key": "",
- "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- "temperature": 1
- }
- response = requests.post(url, headers=headers, data=json.dumps(data))
- if response.status_code == 200:
- yield response.text
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/modules.py b/spaces/123Kumar/vits-uma-genshin-honkai123/modules.py
deleted file mode 100644
index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000
--- a/spaces/123Kumar/vits-uma-genshin-honkai123/modules.py
+++ /dev/null
@@ -1,388 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Film 4 and Enjoy Movies Offline Without Ads or Interruptions.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Film 4 and Enjoy Movies Offline Without Ads or Interruptions.md
deleted file mode 100644
index d5bfb27a445b82aa4eae257e5f2bfe4e9a4c4949..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Film 4 and Enjoy Movies Offline Without Ads or Interruptions.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
How to Download Film 4 and Watch Movies Online
-
If you are a movie lover, you may have heard of Film 4, a British free-to-air television channel that broadcasts a wide range of films, from classics to cults, from indie to mainstream. Film 4 is also available online, where you can stream or download movies on demand. In this article, we will show you how to download Film 4 and watch movies online.
-
What is Film 4?
-
Film 4 is a part of Channel 4, a public-service broadcaster in the UK. Film 4 was launched in 1982 as a subscription-based service, but became free-to-air in 2006. Film 4 is known for its diverse and quality programming, featuring films from various genres, countries, and eras. Film 4 also produces and co-produces original films, such as Slumdog Millionaire, The Favourite, and Three Billboards Outside Ebbing, Missouri.
Film 4 has an online platform called All 4, where you can watch live TV or catch up on shows and movies that you missed. All 4 also has a section called Film 4 On Demand, where you can stream or download movies from the Film 4 library. You can access All 4 on various devices, such as computers, smartphones, tablets, smart TVs, game consoles, etc.
-
How to Download Film 4?
-
To download Film 4 and watch movies online, you need to follow these steps:
-
-
Go to the All 4 website at https://www.channel4.com/ or download the All 4 app on your device.
-
Sign up for a free account or log in if you already have one.
-
Browse or search for the movie that you want to watch.
-
Click on the movie and select the option to download it.
-
Choose the quality and file size that suits your device and internet connection.
-
Wait for the download to finish and enjoy your movie offline.
-
-
Note that not all movies are available for download. You can check the availability by looking for the download icon next to the movie title. Also note that downloaded movies have an expiry date, which means you have to watch them within a certain period of time before they are deleted from your device.
-
What are the Benefits of Downloading Film 4?
-
Downloading Film 4 has many benefits, such as:
-
-
You can watch movies offline without worrying about internet connection or data usage.
-
You can watch movies anytime and anywhere without being tied to a TV schedule.
-
You can choose the quality and file size that suits your device and storage space.
-
You can avoid ads and interruptions that may occur when streaming online.
-
-
Conclusion
-
In this article, we have shown you how to download Film 4 and watch movies online. Film 4 is a great source of entertainment for movie lovers, offering a wide range of films from various genres, countries, and eras. By downloading Film 4, you can enjoy your movies offline without any hassle. We hope you found this article helpful and informative. If you have any questions
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Anya Dasha Crazy Holiday.md b/spaces/1gistliPinn/ChatGPT4/Examples/Anya Dasha Crazy Holiday.md
deleted file mode 100644
index cce40b2c9a908dbd0c35978bc3df06d16df839c2..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Anya Dasha Crazy Holiday.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Find crazy holiday stock images in HD and millions of other royalty-free stock photos, illustrations and vectors in the Shutterstock collection. Thousands of new ... 1fdad05405
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Banjo Marathi Movie Download Dvdrip Movies REPACK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Banjo Marathi Movie Download Dvdrip Movies REPACK.md
deleted file mode 100644
index 2737cda8591c3b47eb79becdd8a11a6af7b070e9..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Banjo Marathi Movie Download Dvdrip Movies REPACK.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Banjo is a 2016 Indian Hindi-language Action Drama film, directed by Ravi Jadhav and ... "Banjo Movie Review: Riteish Deshmukh's Film is a Pale Shadow of Rock On - NDTV Movies". NDTVMovies.com ... Download as PDF · Printable version ... 1fdad05405
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Bloody Ultra Core 3 Keygen High Quality.md b/spaces/1gistliPinn/ChatGPT4/Examples/Bloody Ultra Core 3 Keygen High Quality.md
deleted file mode 100644
index 00e0900f2682b6582090addb35cff5c763d45069..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Bloody Ultra Core 3 Keygen High Quality.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-CMJ RADIO 200 AIRPLAY 50 200 AIRPLAY RADIO 200 ADDS C = Core Station A. WHTESTRFES CATPOWER CURSIVE dYNGOiCXM BLACK KEYS CORAL ... SAHARA HOTNGHTS SOLEDAD BROTHERS T-MNUS BLAaEYES ULTRA ... 6S BLUE NOTE 33 1/3 WAFS IVTYfVORNNG JACKET IGUANAS JAYHAWKSÂ ... 1fdad05405
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds Space 2 APK - Download Now and Start Your Space Adventure with the Angry Birds.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds Space 2 APK - Download Now and Start Your Space Adventure with the Angry Birds.md
deleted file mode 100644
index 3b6b9578a114c1b43294c3cc14654c31a5b7dc88..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds Space 2 APK - Download Now and Start Your Space Adventure with the Angry Birds.md
+++ /dev/null
@@ -1,175 +0,0 @@
-
-
Angry Birds Space 2 APK: A Review of the Latest Version of the Popular Mobile Game
-
Angry Birds Space 2 APK is a physics-based puzzle game and the third game in the Angry Birds series. It was developed and released by Rovio Entertainment Ltd. in March 2021. It follows the tale of an orbital bead, which was stolen by greedy investors. In order to earn back the lost beads, your angry bird has to fly through different space portals and finish all levels in each stage.
Angry Birds Space 2 APK is an exciting and fun game that offers intergalactic fun at every turn. It has over 300 levels across 10 planets, including our own Solar System. It also has new playable characters, special abilities, zero-gravity space adventures, trick shots, hidden bonus levels, daily missions, and more. If you are a fan of Angry Birds or puzzle games in general, you should definitely download and play this game.
-
In this article, we will review the features, gameplay, tips, comparison, rating, and pros and cons of Angry Birds Space 2 APK. We will also show you how to download and install the game on your Android device. By the end of this article, you will have a clear idea of whether this game is worth your time and money or not.
-
Features of Angry Birds Space 2 APK
-
Angry Birds Space 2 APK has many features that make it stand out from other puzzle games. Here are some of them:
-
angry birds space 2 apk download
-angry birds space 2 apk mod
-angry birds space 2 apk free
-angry birds space 2 apk full version
-angry birds space 2 apk android
-angry birds space 2 apk latest version
-angry birds space 2 apk offline
-angry birds space 2 apk unlimited money
-angry birds space 2 apk obb
-angry birds space 2 apk hack
-angry birds space 2 apk for pc
-angry birds space 2 apk revdl
-angry birds space 2 apk uptodown
-angry birds space 2 apk pure
-angry birds space 2 apk mirror
-angry birds space 2 apk rexdl
-angry birds space 2 apk data
-angry birds space 2 apk old version
-angry birds space 2 apk no ads
-angry birds space 2 apk cracked
-angry birds space 2 apk game
-angry birds space 2 apk file
-angry birds space 2 apk mob.org
-angry birds space 2 apk apkpure
-angry birds space 2 apk appvn
-angry birds space 2 apk mod menu
-angry birds space 2 apk all levels unlocked
-angry birds space 2 apk android oyun club
-angry birds space 2 apk andropalace
-angry birds space 2 apk aptoide
-angry birds space 2 apk android republic
-angry birds space 2 apk blackmod
-angry birds space 2 apk bluestacks
-angry birds space 2 apk by rovio entertainment corporation
-angry birds space 2 apk cheat codes
-angry birds space 2 apk coins hack
-angry birds space 2 apk direct download link
-angry birds space 2 apk download for android phoneky.com
-angry birds space 2 apk download highly compressed
-angry birds space 2 apk download mobomarket
-
-
Over 300 interstellar levels across 10 planets: You can play over 300 levels across different planets, such as Cold Cuts, Red Planet, Utopia, Solar System, etc. Each planet has its own theme, challenges, enemies, and surprises. You can also unlock new episodes as you progress through the game.
-
New playable characters and unique special abilities for each bird: You can use different birds to fling at the pigs, such as Red Bird, Bomb Bird, Ice Bird, Lazer Bird, etc. Each bird has its own special ability that can help you in different situations. For example, Lazer Bird can change direction in mid-air, Ice Bird can freeze objects, etc. You can also unlock new birds as you play.
-
Zero-gravity space adventures and trick shots using planets' gravity: One of the most interesting features of Angry Birds Space 2 APK is the zero-gravity space environment. You can use the gravity of the planets to make trick shots and hit the pigs in creative ways. You can also use the space debris, asteroids, and other objects to your advantage. The game physics are realistic and fun to experiment with.
-
Hidden bonus levels and beautifully detailed backgrounds: You can find hidden bonus levels in each planet by looking for golden eggs, stars, or other clues. These bonus levels offer extra challenges and rewards. You can also enjoy the stunning graphics and backgrounds of the game, which are colorful, detailed, and immersive.
-
Daily missions and achievements: You can complete daily missions to earn coins, power-ups, and other rewards. You can also unlock achievements by completing certain tasks or reaching certain milestones. These features add more replay value and motivation to the game.
-
-
How to Download and Install Angry Birds Space 2 APK
-
Angry Birds Space 2 APK is not available on the Google Play Store, so you will need to download it from a third-party source. Here are the steps on how to do that:
-
-
Enable unknown sources on your device: Go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
-
Download Angry Birds Space 2 APK file: You can download the APK file from various websites, such as APKPure, APKMirror, etc. Make sure you download it from a trusted and reliable source. You can also scan the file with an antivirus app before installing it.
-
Install Angry Birds Space 2 APK file: Locate the downloaded file on your device and tap on it. Follow the instructions on the screen to install the app. It may take a few minutes depending on your device and internet speed.
-
Launch Angry Birds Space 2 APK: Once the installation is done, you can launch the app from your app drawer or home screen. Enjoy playing Angry Birds Space 2 APK!
-
-
Tips on how to avoid malware and viruses when downloading APK files:
-
-
Do some research before downloading: Check the reviews, ratings, comments, and feedback of the app and the website you are downloading from. Look for any red flags or signs of malware or viruses.
-
Use a VPN service: A VPN service can protect your online privacy and security by encrypting your data and hiding your IP address. It can also help you bypass geo-restrictions and access blocked websites.
-
Update your device and apps regularly: Updating your device and apps can fix any bugs or vulnerabilities that may expose you to malware or viruses. It can also improve your device performance and stability.
-
-
Gameplay and Tips of Angry Birds Space 2 APK
-
Angry Birds Space 2 APK is a simple yet addictive game that anyone can play. Here are some basics on how to play the game and use the different birds and their abilities:
-
-
How to play the game: The game consists of different levels across different planets. In each level, you have to fling angry birds at the pigs using a slingshot. Your goal is to destroy all the pigs and their structures using as few birds as possible. You can also collect stars, coins, power-ups, and other items along the way.
-
How to use the different birds and their abilities: Each bird has its own color, shape, size, weight, and special ability. You can tap on the screen while flinging a bird to activate its ability. Here are some examples of the birds and their abilities:
-
-
Red Bird: The most basic bird that has no special ability. It is good for breaking wood and glass.
-
Bomb Bird: A black bird that explodes when tapped or after hitting something. It is good for breaking stone and metal.
-
Ice Bird: A blue bird that freezes objects when tapped or after hitting something. It is good for making objects brittle and easier to break.
-
Lazer Bird: A purple bird that changes direction in mid-air when tapped. It is good for hitting hard-to-reach targets or making curved shots.
-
And more...: There are many more birds that you can unlock and use in the game, such as Terence, Stella, Bubbles, Hal, etc. Each one has its own unique ability that can help you in different situations.
-
-
-
Tips and tricks on how to complete the levels and get three stars:
-
-
Use the right bird for the right job: Try to match the bird's ability with the type of material or structure you are aiming at. For example, use Bomb Bird for stone and metal, Ice Bird for wood and glass, etc.
-
Use the gravity of the planets: You can use the gravity of the planets to make curved shots or hit multiple targets with one bird. You can also use the space debris, asteroids, and other objects to bounce or ricochet your birds.
-
Use power-ups wisely: You can use power-ups to boost your birds' abilities or get extra birds. For example, you can use the King Sling to fling your birds faster and farther, the Sling Scope to aim more accurately, the Birdquake to shake the ground and make structures collapse, etc. However, power-ups are limited and cost coins, so use them sparingly and only when necessary.
-
Replay levels to improve your score: You can replay any level you have completed to try to get a better score or more stars. You can also try different strategies or birds to see what works best for you.
-
Watch videos or read guides for help: If you are stuck on a level or want to learn more tips and tricks, you can watch videos or read guides online. There are many websites and YouTube channels that offer walkthroughs, tutorials, and tips for Angry Birds Space 2 APK.
-
-
Comparison with Angry Birds Space
-
Angry Birds Space 2 APK is a sequel to Angry Birds Space, which was released in 2012. Angry Birds Space was the first game in the series that introduced the space theme and the zero-gravity physics. It was also a huge success and received positive reviews from critics and users alike.
-
Angry Birds Space 2 APK is similar to Angry Birds Space in many ways, but it also has some differences. Here are some of them:
-
-
-
Angry Birds Space
-
Angry Birds Space 2 APK
-
-
-
- Has over 200 levels across 9 planets
-
- Has over 300 levels across 10 planets
-
-
-
- Has 8 playable characters with different abilities
-
- Has 12 playable characters with different abilities
-
-
-
- Has boss battles with King Pig, Fat Pig, etc.
-
- Has boss battles with greedy investors, etc.
-
-
-
- Has golden eggs and eggsteroids as hidden bonus levels
-
- Has golden eggs and stars as hidden bonus levels
-
-
-
- Has power-ups such as Super Seeds, Space Eagles, etc.
-
- Has power-ups such as King Sling, Sling Scope, Birdquake, etc.
-
-
- Has a simple and cartoonish graphics style
-
- Has a more detailed and realistic graphics style
-
-
-
- Has a space-themed soundtrack and sound effects
-
- Has a more varied and dynamic soundtrack and sound effects
Both Angry Birds Space and Angry Birds Space 2 APK are great games that offer hours of entertainment and enjoyment. However, if we have to choose one, we would say that Angry Birds Space 2 APK is better than Angry Birds Space. This is because Angry Birds Space 2 APK has more levels, characters, features, power-ups, graphics, and sounds than Angry Birds Space. It also has more variety, challenge, and replay value than Angry Birds Space. Therefore, we think that Angry Birds Space 2 APK is a superior game that deserves your attention and appreciation.
-
Rating and Review of Angry Birds Space 2 APK
-
Angry Birds Space 2 APK is a highly rated game by users and critics alike. It has an average rating of 4.5 out of 5 stars on various websites and platforms. It also has many positive reviews and feedback from users who praise the game for its gameplay, graphics, sound, features, etc.
-
Here are some of the pros and cons of the game based on user feedback:
Here are some of the strengths and weaknesses of the game based on gameplay, graphics , sound, etc.:
-
-
Strengths: The game has a unique and innovative gameplay that combines physics, puzzle, and strategy elements. It also has a stunning and realistic graphics style that creates a immersive and captivating space environment. The game also has a dynamic and varied soundtrack and sound effects that enhance the mood and atmosphere of the game.
-
Weaknesses: The game can be very difficult and frustrating at times, especially in the later levels. It can also have some bugs and glitches that affect the performance and stability of the game. The game also has some in-app purchases and ads that can be annoying and expensive.
-
-
Conclusion
-
Angry Birds Space 2 APK is a fantastic game that offers a lot of fun and challenge for puzzle lovers and Angry Birds fans. It has over 300 levels across 10 planets, new playable characters, special abilities, zero-gravity space adventures, trick shots, hidden bonus levels, daily missions, and more. It also has a beautiful and realistic graphics style, a dynamic and varied soundtrack and sound effects, and a simple and intuitive user interface.
-
However, the game can also be very difficult and frustrating at times, especially in the later levels. It can also have some bugs and glitches that affect the performance and stability of the game. The game also has some in-app purchases and ads that can be annoying and expensive.
-
Therefore, we recommend that you download and play Angry Birds Space 2 APK if you are looking for a fun and challenging puzzle game that will keep you entertained for hours. However, be prepared to face some difficulties and frustrations along the way. You can also compare it with Angry Birds Space to see which one you like better.
-
FAQs
-
Here are some frequently asked questions about Angry Birds Space 2 APK:
-
-
Q1: Is Angry Birds Space 2 APK free?
-
A1: Yes, Angry Birds Space 2 APK is free to download and play. However, it has some in-app purchases and ads that can enhance your gaming experience or remove some limitations.
-
Q2: Is Angry Birds Space 2 APK safe?
-
A2: Yes, Angry Birds Space 2 APK is safe to download and play. However, you should always download it from a trusted and reliable source. You should also scan the file with an antivirus app before installing it. You should also enable unknown sources on your device only when necessary.
-
Q3: Is Angry Birds Space 2 APK compatible with my device?
-
A3: Angry Birds Space 2 APK is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may have different specifications or features that may affect the performance or compatibility of the game. You can check the minimum requirements of the game on the website or platform you are downloading from.
-
Q4: How can I update Angry Birds Space 2 APK?
-
A4: You can update Angry Birds Space 2 APK by downloading and installing the latest version of the game from the same source you downloaded it from. You can also check for updates within the game settings or menu.
-
Q5: How can I contact the developers of Angry Birds Space 2 APK?
-
A5: You can contact the developers of Angry Birds Space 2 APK by visiting their official website or social media pages. You can also send them an email or leave them a feedback or review on the website or platform you downloaded the game from.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download CarX Drift Racing 2 Mod Apk Obb Data for Android.md b/spaces/1phancelerku/anime-remove-background/Download CarX Drift Racing 2 Mod Apk Obb Data for Android.md
deleted file mode 100644
index eb0c06242db30810bbf11b7d81a5e70dfa2cacde..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download CarX Drift Racing 2 Mod Apk Obb Data for Android.md
+++ /dev/null
@@ -1,139 +0,0 @@
-
-
CarX Drift Racing 2 OBB: A Guide for Android Users
-
If you are a fan of drifting games, you might have heard of CarX Drift Racing 2, one of the most popular and realistic drift racing games on Android. But did you know that there is a way to enhance your gaming experience even more? In this article, we will show you what CarX Drift Racing 2 OBB is, why you need it, how to download and install it on your device, and what are its features and benefits. Let's get started!
-
What is CarX Drift Racing 2 OBB and why do you need it?
-
OBB stands for Opaque Binary Blob, which is a type of file that contains additional data for some Android apps. These files are usually large in size and are stored in a separate folder on your device. They work together with APK files, which are the main files that install apps on your device.
CarX Drift Racing 2 is a game that requires an OBB file to run properly. This is because the game has high-quality graphics, sound effects, and animations that cannot fit in a single APK file. The OBB file contains all the extra data that makes the game look and sound amazing.
-
By using the OBB file for CarX Drift Racing 2, you can enjoy faster loading times, smoother performance, and more content in the game. You can access more tracks, cars, skins, and body parts that are not available in the APK file alone. You can also save space on your device by deleting the APK file after installing the OBB file.
-
How to download and install CarX Drift Racing 2 OBB on your Android device?
-
Downloading and installing CarX Drift Racing 2 OBB on your Android device is easy if you follow these steps:
-
-
Download the APK file and the OBB file from a reliable source. You can find them on websites like [APKPure](^1^) or [GameGuardian](^2^). Make sure you download the latest version of both files.
-
Install the APK file on your device by tapping on it. You might need to enable unknown sources in your settings to do this.
-
Locate the OBB file on your device using a file manager app. It should be in a zip or rar format.
-
Extract the O BB file to a folder on your device. The folder should be named com.carxtech.carxdr2 and should be located in Android/obb.
-
Copy the OBB file to the com.carxtech.carxdr2 folder. The OBB file should have a name like main.1234.com.carxtech.carxdr2.obb, where 1234 is the version number.
-
Launch the game and enjoy!
-
-
If you have any problems with the installation, you can check the troubleshooting section below.
-
What are the features and benefits of CarX Drift Racing 2 OBB?
-
CarX Drift Racing 2 is a game that will satisfy your need for speed and adrenaline. It is a realistic and immersive drift racing game that lets you customize your cars, compete with other players, and master your drifting skills. Here are some of the features and benefits of CarX Drift Racing 2 OBB:
-
-
Realistic physics: The game uses a sophisticated physics engine that simulates the behavior of real cars on different surfaces and conditions. You can feel the weight, traction, and inertia of your car as you drift around corners and curves.
-
Customizable cars: You can choose from over 80 cars from different brands and models. You can also modify your cars with various body parts, skins, wheels, tires, suspension, engine, and more. You can create your own unique style and show it off to other players.
-
Online multiplayer: You can join online races and tournaments with other players from around the world. You can challenge your friends or random opponents in different modes, such as tandem drifting, sprint racing, or capture the flag. You can also join clubs and teams to cooperate and compete with other players.
-
Career mode: You can progress through a series of events and missions that will test your drifting skills and earn you rewards. You can unlock new cars, tracks, parts, and achievements as you advance in your career. You can also improve your reputation and rank among other drifters.
-
-
By using the OBB file for CarX Drift Racing 2, you can access more content and features that are not available in the APK file alone. Here is a table that compares the game size and content with and without the OBB file:
-
carx drift racing 2 apk obb download
-carx drift racing 2 mod apk obb
-carx drift racing 2 android game obb
-carx drift racing 2 apk xapk obb
-carx drift racing 2 apk combo obb
-carx drift racing 2 apk data obb
-carx drift racing 2 apk full obb
-carx drift racing 2 apk latest version obb
-carx drift racing 2 apk offline obb
-carx drift racing 2 apk pure obb
-carx drift racing 2 apk revdl obb
-carx drift racing 2 apk unlimited money obb
-carx drift racing 2 apk update obb
-carx drift racing 2 game guardian obb
-carx drift racing 2 game for android obb
-carx drift racing 2 game for pc obb
-carx drift racing 2 game free download obb
-carx drift racing 2 game online obb
-carx drift racing 2 game play store obb
-carx drift racing 2 game review obb
-carx drift racing 2 hack apk obb
-carx drift racing 2 hack mod obb
-carx drift racing 2 hack version obb
-carx drift racing 2 install xapk obb
-carx drift racing 2 mod menu obb
-carx drift racing 2 mod money obb
-carx drift racing 2 mod unlocked obb
-carx drift racing 2 new update obb
-carx drift racing 2 old version obb
-carx drift racing 2 original apk obb
-carx drift racing 2 premium apk obb
-carx drift racing 2 pro apk obb
-carx drift racing 2 rexdl apk obb
-carx drift racing 2 sequel apk obb
-carx drift racing 2 tips and tricks obb
-carx drift racing 2 unlimited coins obb
-carx drift racing 2 unlimited gold obb
-carx drift racing 2 v1.1.0 apk obb
-carx drift racing 2 v1.26.1 apk obb
-how to download carx drift racing 2 with obb file
-how to install carx drift racing 2 with obb file
-how to play carx drift racing 2 with obb file
-how to update carx drift racing 2 with obb file
-what is the size of carx drift racing 2 with obb file
-where to download carx drift racing 2 with obb file
-
-
-
Game size
-
Tracks
-
Cars
-
Skins
-
Body parts
-
-
-
Without OBB file
-
10
-
20
-
50
-
100
-
-
-
With OBB file
-
30
-
80
-
200
-
500
-
-
-
Conclusion
-
In conclusion, CarX Drift Racing 2 OBB is a must-have for any drift racing fan who wants to enjoy the full potential of the game. By downloading and installing the OBB file on your Android device, you can experience faster loading times, smoother performance, and more content in the game. You can also save space on your device by deleting the APK file after installing the OBB file.
-
If you are ready to take your drifting skills to the next level, download CarX Drift Racing 2 OBB today and join the millions of players who are already hooked on this amazing game. You will not regret it!
-
FAQs
-
Here are some frequently asked questions about CarX Drift Racing 2 OBB:
-
-
How do I update CarX Drift Racing 2 OBB?
-
To update CarX Drift Racing 2 OBB, you need to download the latest version of both the APK file and the OBB file from a reliable source. Then, you need to install the APK file on your device and copy the OBB file to the com.carxtech.carxdr2 folder on your device. You can overwrite the old files with the new ones.
-
How do I fix errors or crashes with CarX Drift Racing 2 OBB?
-
If you encounter any errors or crashes with CarX Drift Racing 2 OBB, you can try these solutions:
-
-
Make sure you have enough storage space on your device.
-
Make sure you have a stable internet connection.
-
Make sure you have downloaded the correct version of both the APK file and the OBB file for your device.
-
Make sure you have copied the OBB file to the correct folder on your device.
-
Make sure you have granted the necessary permissions to the game, such as storage, network, and location.
-
Clear the cache and data of the game from your settings.
-
Restart your device and try again.
-
-
If none of these solutions work, you can contact the developer of the game for further assistance.
-
How do I uninstall CarX Drift Racing 2 OBB?
-
To uninstall CarX Drift Racing 2 OBB, you need to delete both the APK file and the OBB file from your device. You can do this by following these steps:
-
-
Go to your settings and find the app manager or application list.
-
Find CarX Drift Racing 2 and tap on it.
-
Tap on uninstall and confirm your choice.
-
Go to your file manager app and find the Android/obb folder.
-
Delete the com.carxtech.carxdr2 folder and its contents.
-
-
You have successfully uninstalled CarX Drift Racing 2 OBB from your device.
-
Is CarX Drift Racing 2 OBB safe to use?
-
CarX Drift Racing 2 OBB is safe to use as long as you download it from a trusted source. You should avoid downloading it from unknown or suspicious websites that might contain malware or viruses. You should also scan the files with an antivirus app before installing them on your device. You should also be careful not to share your personal or financial information with any third-party apps or websites that claim to offer cheats or hacks for the game.
-
What are some tips and tricks for CarX Drift Racing 2 OBB?
-
Here are some tips and tricks that can help you improve your drifting skills and enjoy the game more:
-
-
Practice on different tracks and cars to learn how they handle and react to different situations.
-
Adjust your settings and controls to suit your preferences and comfort level.
-
Use the tuning feature to optimize your car's performance and appearance.
-
Watch replays and ghost races to learn from other players and improve your techniques.
-
Join clubs and teams to chat, cooperate, and compete with other players.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Euro Truck Simulator 3 Europa The Ultimate Truck Driving Game for Android.md b/spaces/1phancelerku/anime-remove-background/Euro Truck Simulator 3 Europa The Ultimate Truck Driving Game for Android.md
deleted file mode 100644
index cc34140506606dc2bdbd5a98ed8d4cfecbce443f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Euro Truck Simulator 3 Europa The Ultimate Truck Driving Game for Android.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
European Truck Simulator 3: A Review of the Best Truck Driving Game
-
Do you love driving trucks and delivering cargo across different countries? Do you want to experience the thrill and challenge of being a professional trucker? If yes, then you should try European Truck Simulator 3, the latest and most realistic truck simulation game ever made. In this article, we will review ETS3 and tell you why it is the best truck driving game you can play. We will also show you how to download and install ETS3 APK on your Android device, so you can enjoy this amazing game anytime, anywhere.
Euro Truck Simulator 3 (ETS3) is a video game developed by SCS Software, the same studio that created the popular Euro Truck Simulator 2 (ETS2). ETS3 is the third instalment in the Euro Truck Simulator series, which started in 2008. ETS3 is expected to be released in 2028 for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet.
-
The gameplay and features of ETS3
-
ETS3 is a truck simulation game that lets you drive various trucks and trailers across Europe. You can choose from different truck models, chassis configurations, customizations, and cosmetics. You can also select your job and deliver your cargo to different destinations. You have to follow the traffic rules, manage your fuel, damage, fatigue, and other factors that affect your driving performance. You can also interact with other drivers, hire employees, buy garages, and expand your business.
-
ETS3 features a realistic and immersive truck physics system that makes driving feel like real life. You can feel the weight, speed, acceleration, braking, steering, suspension, and other aspects of your truck. You can also hear the authentic engine sounds, horn sounds, tire sounds, and other sound effects that add to the atmosphere. You can also adjust the camera angles, mirrors, lights, indicators, wipers, cruise control, and other controls to suit your preference.
-
ETS3 also features a vast and detailed map of Europe that covers dozens of cities and countries. You can travel across highways, country roads, urban roads, mountain roads, tunnels, bridges, tolls, ferries, and other types of roads. You can see the landmarks, buildings, landscapes, weather conditions, day and night cycle, seasons, and other elements that make each location unique. You can also encounter different types of traffic vehicles, pedestrians, animals, events, accidents, roadworks, police patrols, tolls, and other situations that make your journey more dynamic and unpredictable.
-
The system requirements and platforms of ETS3
-
ETS3 is expected to have higher system requirements than ETS2 due to its improved graphics and physics engine. According to some sources, these are the minimum and recommended system requirements for ETS3 on PC:
-
Truckers of Europe 3 android game download
-How to install european truck simulator 3 on mobile
-Best truck driving simulator games for android
-European truck simulator 3 apk mod unlimited money
-Download european truck simulator 3 latest version
-European truck simulator 3 gameplay and features
-Free download european truck simulator 3 for android
-European truck simulator 3 review and rating
-European truck simulator 3 cheats and tips
-European truck simulator 3 online multiplayer mode
-European truck simulator 3 system requirements and compatibility
-European truck simulator 3 update and patch notes
-European truck simulator 3 trailer and screenshots
-European truck simulator 3 support and feedback
-European truck simulator 3 download link and file size
-How to play european truck simulator 3 offline
-European truck simulator 3 maps and routes
-European truck simulator 3 customization and options
-European truck simulator 3 realistic physics and graphics
-European truck simulator 3 challenges and achievements
-How to unlock all trucks in european truck simulator 3
-European truck simulator 3 vs american truck simulator
-How to backup and restore european truck simulator 3 data
-European truck simulator 3 guide and tutorial
-European truck simulator 3 best trucks and trailers
-How to fix european truck simulator 3 errors and bugs
-European truck simulator 3 mod apk download free
-How to connect european truck simulator 3 with steering wheel
-European truck simulator 3 sound effects and music
-European truck simulator 3 traffic and weather conditions
-How to speed up european truck simulator 3 performance
-European truck simulator 3 skins and accessories
-How to change language in european truck simulator 3
-European truck simulator 3 forum and community
-European truck simulator 3 news and updates
-How to record and share european truck simulator 3 gameplay videos
-European truck simulator 3 tips and tricks for beginners
-How to get more money in european truck simulator 3
-European truck simulator 3 comparison with other simulators
-How to download european truck simulator 3 for pc
-European truck simulator 3 controller support and settings
-How to enable vr mode in european truck simulator 3
-European truck simulator 3 best mods and addons
-How to create your own mods for european truck simulator 3
-European truck simulator 3 faq and troubleshooting
-How to join a convoy in european truck simulator 3 online mode
-
-
-
Minimum
-
Recommended
-
-
-
OS: Windows XP or Windows Vista CPU: Processor 2.4 GHz Intel Pentium 4 or equivalent GPU: 128 MB video card: GeForce 4 (not MX!) or better, ATI Radeon 8500 or better RAM: 512 MB RAM (1 GB on Windows Vista) HDD: 600 MB of free hard drive space DirectX: DirectX 9.0
-
OS: Windows XP or Windows Vista CPU: Processor 3.0 GHz Intel Pentium 4 or equivalent GPU: 256 MB video card: GeForce 6 or better, ATI Radeon 9800 or better RAM: 1 GB RAM (2 GB on Windows Vista) HDD: 1.5 GB of free hard drive space DirectX: DirectX 9.0c
-
-
-
ETS3 is also expected to be released for PS5 and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet. The console versions of ETS3 may have different features and gameplay modes than the PC version, such as online multiplayer, controller support, achievements, and other options.
-
Why should you play European Truck Simulator 3?
-
If you are a fan of truck driving games, then you should definitely play ETS3. ETS3 is the best truck simulation game ever made, and it offers many reasons to play it. Here are some of the main reasons why you should play ETS3:
-
The realistic and immersive truck driving experience
-
ETS3 gives you the opportunity to experience what it is like to be a real truck driver. You can drive various trucks and trailers across Europe, following the traffic rules, managing your fuel, damage, fatigue, and other factors that affect your driving performance. You can also interact with other drivers, hire employees, buy garages, and expand your business. You can feel the realistic and immersive truck physics system that makes driving feel like real life. You can also hear the authentic engine sounds, horn sounds, tire sounds, and other sound effects that add to the atmosphere. You can also adjust the camera angles, mirrors, lights, indicators, wipers, cruise control, and other controls to suit your preference.
-
The variety and customization of trucks and trailers
-
ETS3 lets you choose from different truck models, chassis configurations, customizations, and cosmetics. You can select from over 50 licensed truck brands, such as Volvo, Scania, Mercedes-Benz, MAN, DAF, Renault, Iveco, and more. You can also customize your truck with different engines, transmissions, axles, suspensions, tires, colors, paint jobs, stickers, accessories, and more. You can also choose from different types of trailers, such as flatbeds, curtainsiders, refrigerated, tankers, low loaders, car carriers, and more. You can also customize your trailer with different cargoes, weights, lengths, widths, heights, colors, paint jobs, stickers, accessories, and more.
-
The exploration and discovery of Europe
-
ETS3 lets you explore and discover Europe in a way that no other game can. You can travel across highways, country roads, urban roads, mountain roads, tunnels, bridges, tolls, ferries, and other types of roads. You can see the landmarks, buildings, landscapes, weather conditions, day and night cycle, seasons, and other elements that make each location unique. You can also encounter different types of traffic vehicles, pedestrians, animals, events, accidents, roadworks, police patrols, tolls, and other situations that make your journey more dynamic and unpredictable. You can visit dozens of cities and countries in Europe, such as London, Paris, Berlin, Rome, Madrid, Amsterdam, Stockholm, Warsaw, Prague, Vienna, Zurich, Lisbon, Dublin, and more.
-
How to download and install European Truck Simulator 3 APK?
-
If you want to play ETS3 on your Android device, you will need to download and install ETS3 APK. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, downloading and installing ETS3 APK is not as simple as it sounds. There are some risks and challenges involved in this process. Here are some of the steps and precautions you need to follow to download and install ETS3 APK safely and successfully:
-
The steps to download and install ETS3 APK on Android devices
-
-
Find a reliable and trustworthy source for ETS3 APK. There are many websites that claim to offer ETS3 APK for free download, but not all of them are safe and legitimate. Some of them may contain malware or viruses that can harm your device or steal your personal information. Some of them may also offer fake or outdated versions of ETS3 APK that may not work properly or at all. Therefore, you need to be careful and do some research before downloading ETS3 APK from any source.
-
Download ETS3 APK file to your device. Once you have found a reliable and trustworthy source for ETS3 APK, you can download the file to your device. You may need to enable the option to download files from unknown sources in your device settings. You may also need to grant some permissions to the source website or app to access your device storage. You should also check the file size and name before downloading it, and make sure it matches the expected values.
-
Install ETS3 APK on your device. After downloading ETS3 APK file to your device, you can install it by tapping on it. You may need to confirm the installation and grant some permissions to the game app to access your device features. You should also read the terms and conditions and privacy policy of the game app before installing it. You may also need to verify your device compatibility and security before installing it.
-
Launch ETS3 APK on your device. After installing ETS3 APK on your device, you can launch it by tapping on its icon. You may need to sign in with your account or create a new one to access the game features. You may also need to download some additional data or updates for the game to run smoothly. You should also check the game settings and adjust them according to your preference and device performance.
-
-
The precautions and tips to avoid malware and viruses
-
Downloading and installing ETS3 APK on your Android device can be risky and challenging, as there are many potential threats and problems that can occur. Here are some of the precautions and tips you need to follow to avoid malware and viruses when downloading and installing ETS3 APK:
-
-
Use a reputable antivirus or security app on your device. You should always have a reliable and updated antivirus or security app on your device that can scan and protect your device from malware and viruses. You should also run a full scan of your device before and after downloading and installing ETS3 APK, and delete any suspicious or harmful files or apps.
-
Use a secure and stable internet connection. You should always use a secure and stable internet connection when downloading and installing ETS3 APK, as a weak or unstable connection can cause errors or interruptions in the process. You should also avoid using public or unsecured Wi-Fi networks, as they can expose your device to hackers or cyberattacks.
-
Use a backup or recovery tool on your device. You should always have a backup or recovery tool on your device that can save and restore your data and settings in case of any damage or loss. You should also backup your data and settings before and after downloading and installing ETS3 APK, and restore them if needed.
-
Use a trusted and verified source for ETS3 APK. You should always use a trusted and verified source for ETS3 APK, as an untrusted or unverified source can provide fake or infected files or apps that can harm your device or steal your personal information. You should also check the reviews, ratings, comments, feedback, and reputation of the source before downloading and installing ETS3 APK, and avoid any source that has negative or suspicious signs.
-
-
Conclusion
-
Euro Truck Simulator 3 (ETS3) is a truck simulation game that lets you drive various trucks and trailers across Europe. It is the best truck driving game ever made, as it offers a realistic and immersive truck driving experience, a variety and customization of trucks and trailers, and an exploration and discovery of Europe. It is expected to be released in 2028 for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet.
-
If you want to play ETS3 on your Android device, you will need to download and install ETS3 APK. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, downloading and installing ETS3 APK is not as simple as it sounds. There are some risks and challenges involved in this process. You will need to follow some steps and precautions to download and install ETS3 APK safely and successfully.
-
We hope this article has helped you understand what ETS3 is, why you should play it, and how to download and install ETS3 APK on your Android device. If you have any questions, comments, or feedback, please feel free to share them with us. We would love to hear from you. Thank you for reading and happy trucking!
-
FAQs
-
Here are some of the frequently asked questions (FAQs) about ETS3 and ETS3 APK:
-
-
What is the difference between ETS3 and ETS2?
-
ETS3 and ETS2 are both truck simulation games developed by SCS Software, but they have some differences. ETS3 is the latest and most realistic truck simulation game ever made, while ETS2 is the previous and most popular truck simulation game in the series. ETS3 has improved graphics and physics engine, more truck models and customizations, more trailer types and cargoes, more cities and countries, more road types and situations, and more features and gameplay modes than ETS2. However, ETS3 is not yet released, while ETS2 is available for PC, Mac, Linux, PS4, and Xbox One.
-
Is ETS3 APK safe and legal to download and install?
-
ETS3 APK is not safe or legal to download and install on your Android device. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, this file is not authorized or verified by the developers or the app stores, and it may contain malware or viruses that can harm your device or steal your personal information. It may also violate the terms and conditions and privacy policy of the game app and the app stores, and it may result in legal actions or penalties. Therefore, you should avoid downloading and installing ETS3 APK on your Android device.
-
How much does ETS3 cost and where can I buy it?
-
ETS3 is expected to cost around $40-$60 USD for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet. You can buy ETS3 from the official website of SCS Software or from other online or offline retailers that sell video games. However, you will have to wait until ETS3 is released, which may take a few years.
-
Can I play ETS3 online with other players?
-
ETS3 may have an online multiplayer mode that allows you to play with other players around the world. You may be able to join or create a convoy of trucks, chat with other drivers, compete in races or challenges, cooperate in missions or deliveries, share your customizations or screenshots, and more. However, there is no official confirmation or announcement from the developers yet about the online multiplayer mode of ETS3.
-
Can I mod ETS3 or use mods from ETS2?
-
ETS3 may have a modding support that allows you to create or use mods for the game. Mods are modifications that change or add something to the game, such as new trucks, trailers, cargoes, maps, roads, traffic, weather, sounds, graphics, gameplay features, and more. However, there is no official confirmation or announcement from the developers yet about the modding support of ETS3. You may not be able to use mods from ETS2 for ETS3, as they may not be compatible or updated for the new game.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py
deleted file mode 100644
index 0a21dd3557faf946765d1953add66ceb7ff3736e..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py
+++ /dev/null
@@ -1,548 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Callable, List, Optional, Union
-
-import numpy as np
-import paddle
-import PIL
-from packaging import version
-
-from paddlenlp.transformers import CLIPFeatureExtractor, XLMRobertaTokenizer
-
-from ...configuration_utils import FrozenDict
-from ...models import AutoencoderKL, UNet2DConditionModel
-from ...pipeline_utils import DiffusionPipeline
-from ...schedulers import (
- DDIMScheduler,
- DPMSolverMultistepScheduler,
- EulerAncestralDiscreteScheduler,
- EulerDiscreteScheduler,
- LMSDiscreteScheduler,
- PNDMScheduler,
-)
-from ...utils import PIL_INTERPOLATION, deprecate, logging
-from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-from . import AltDiffusionPipelineOutput, RobertaSeriesModelWithTransformation
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
-def preprocess(image):
- if isinstance(image, paddle.Tensor):
- return image
- elif isinstance(image, PIL.Image.Image):
- image = [image]
-
- if isinstance(image[0], PIL.Image.Image):
- w, h = image[0].size
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
-
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- image = np.array(image).astype(np.float32) / 255.0
- image = image.transpose(0, 3, 1, 2)
- image = 2.0 * image - 1.0
- image = paddle.to_tensor(image)
- elif isinstance(image[0], paddle.Tensor):
- image = paddle.concat(image, axis=0)
- return image
-
-
-class AltDiffusionImg2ImgPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-guided image to image generation using Alt Diffusion.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`RobertaSeriesModelWithTransformation`]):
- Frozen text-encoder. Alt Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.RobertaSeriesModelWithTransformation),
- specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`XLMRobertaTokenizer`):
- Tokenizer of class
- [XLMRobertaTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.XLMRobertaTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: RobertaSeriesModelWithTransformation,
- tokenizer: XLMRobertaTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[
- DDIMScheduler,
- PNDMScheduler,
- LMSDiscreteScheduler,
- EulerDiscreteScheduler,
- EulerAncestralDiscreteScheduler,
- DPMSolverMultistepScheduler,
- ],
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPFeatureExtractor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
- " file"
- )
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["steps_offset"] = 1
- scheduler._internal_dict = FrozenDict(new_config)
-
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
- )
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["clip_sample"] = False
- scheduler._internal_dict = FrozenDict(new_config)
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
- version.parse(unet.config._ppdiffusers_version).base_version
- ) < version.parse("0.9.0.dev0")
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
- deprecation_message = (
- "The configuration file of the unet has set the default `sample_size` to smaller than"
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
- " the `unet/config.json` file"
- )
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(unet.config)
- new_config["sample_size"] = 64
- unet._internal_dict = FrozenDict(new_config)
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `list(int)`):
- prompt to be encoded
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pd",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because XLM-Roberta can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask
- else:
- attention_mask = None
-
- text_embeddings = self.text_encoder(
- text_input_ids,
- attention_mask=attention_mask,
- )
- text_embeddings = text_embeddings[0]
-
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- bs_embed, seq_len, _ = text_embeddings.shape
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pd",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask
- else:
- attention_mask = None
-
- uncond_embeddings = self.text_encoder(
- uncond_input.input_ids,
- attention_mask=attention_mask,
- )
- uncond_embeddings = uncond_embeddings[0]
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = uncond_embeddings.shape[1]
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
-
- return text_embeddings
-
- def run_safety_checker(self, image, dtype):
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
- )
- else:
- has_nsfw_concept = None
- return image, has_nsfw_concept
-
- def decode_latents(self, latents):
- latents = 1 / 0.18215 * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clip(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
- return image
-
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- def check_inputs(self, prompt, strength, callback_steps):
- if not isinstance(prompt, str) and not isinstance(prompt, list):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if strength < 0 or strength > 1:
- raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- def get_timesteps(self, num_inference_steps, strength):
- # get the original timestep using init_timestep
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
- t_start = max(num_inference_steps - init_timestep, 0)
- timesteps = self.scheduler.timesteps[t_start:]
-
- return timesteps, num_inference_steps - t_start
-
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None):
- image = image.cast(dtype=dtype)
- batch_size = batch_size * num_images_per_prompt
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if isinstance(generator, list):
- init_latents = [
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
- ]
- init_latents = paddle.concat(init_latents, axis=0)
- else:
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
-
- init_latents = 0.18215 * init_latents
-
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
- # expand init_latents for batch_size
- deprecation_message = (
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
- " your script to pass as many initial images as text prompts to suppress this warning."
- )
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
- additional_image_per_prompt = batch_size // init_latents.shape[0]
- init_latents = paddle.concat([init_latents] * additional_image_per_prompt, axis=0)
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
- raise ValueError(
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
- )
- else:
- init_latents = paddle.concat([init_latents], axis=0)
-
- shape = init_latents.shape
- if isinstance(generator, list):
- shape = [
- 1,
- ] + shape[1:]
- noise = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
- noise = paddle.concat(noise, axis=0)
- else:
- noise = paddle.randn(shape, generator=generator, dtype=dtype)
-
- # get latents
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
- latents = init_latents
-
- return latents
-
- @paddle.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- image: Union[paddle.Tensor, PIL.Image.Image] = None,
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- image (`paddle.Tensor` or `PIL.Image.Image`):
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
- process.
- strength (`float`, *optional*, defaults to 0.8):
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
- noise will be maximum and the denoising process will run for the full number of iterations specified in
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference. This parameter will be modulated by `strength`.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`paddle.Generator`, *optional*):
- One or a list of paddle generator(s) to make generation deterministic.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Returns:
- [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
- # 1. Check inputs
- self.check_inputs(prompt, strength, callback_steps)
-
- # 2. Define call parameters
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- text_embeddings = self._encode_prompt(
- prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
- )
-
- # 4. Preprocess image
- image = preprocess(image)
-
- # 5. set timesteps
- self.scheduler.set_timesteps(num_inference_steps)
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
- latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
-
- # 6. Prepare latent variables
- latents = self.prepare_latents(
- image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator
- )
-
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 8. Denoising loop
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # 9. Post-processing
- image = self.decode_latents(latents)
-
- # 10. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
-
- # 11. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/44ov41za8i/FreeVC/models.py b/spaces/44ov41za8i/FreeVC/models.py
deleted file mode 100644
index 46b8aacb1bef18f6fad4c20c968b19125626799c..0000000000000000000000000000000000000000
--- a/spaces/44ov41za8i/FreeVC/models.py
+++ /dev/null
@@ -1,351 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SpeakerEncoder(torch.nn.Module):
- def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
- super(SpeakerEncoder, self).__init__()
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
- self.relu = nn.ReLU()
-
- def forward(self, mels):
- self.lstm.flatten_parameters()
- _, (hidden, _) = self.lstm(mels)
- embeds_raw = self.relu(self.linear(hidden[-1]))
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
-
- def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
- mel_slices = []
- for i in range(0, total_frames-partial_frames, partial_hop):
- mel_range = torch.arange(i, i+partial_frames)
- mel_slices.append(mel_range)
-
- return mel_slices
-
- def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
- mel_len = mel.size(1)
- last_mel = mel[:,-partial_frames:]
-
- if mel_len > partial_frames:
- mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
- mels = list(mel[:,s] for s in mel_slices)
- mels.append(last_mel)
- mels = torch.stack(tuple(mels), 0).squeeze(1)
-
- with torch.no_grad():
- partial_embeds = self(mels)
- embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
- #embed = embed / torch.linalg.norm(embed, 2)
- else:
- with torch.no_grad():
- embed = self(last_mel)
-
- return embed
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- ssl_dim,
- use_spk,
- **kwargs):
-
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- self.ssl_dim = ssl_dim
- self.use_spk = use_spk
-
- self.enc_p = Encoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if not self.use_spk:
- self.enc_spk = SpeakerEncoder(model_hidden_size=gin_channels, model_embedding_size=gin_channels)
-
- def forward(self, c, spec, g=None, mel=None, c_lengths=None, spec_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- if spec_lengths == None:
- spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device)
-
- if not self.use_spk:
- g = self.enc_spk(mel.transpose(1,2))
- g = g.unsqueeze(-1)
-
- _, m_p, logs_p, _ = self.enc_p(c, c_lengths)
- z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
- z_p = self.flow(z, spec_mask, g=g)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
-
- return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, c, g=None, mel=None, c_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- if not self.use_spk:
- g = self.enc_spk.embed_utterance(mel.transpose(1,2))
- g = g.unsqueeze(-1)
-
- z_p, m_p, logs_p, c_mask = self.enc_p(c, c_lengths)
- z = self.flow(z_p, c_mask, g=g, reverse=True)
- o = self.dec(z * c_mask, g=g)
-
- return o
diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/partial_fc.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/partial_fc.py
deleted file mode 100644
index 17e2d25715d10ba446c957e1d2528b0687ed71d5..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/partial_fc.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import logging
-import os
-
-import torch
-import torch.distributed as dist
-from torch.nn import Module
-from torch.nn.functional import normalize, linear
-from torch.nn.parameter import Parameter
-
-
-class PartialFC(Module):
- """
- Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
- Partial FC: Training 10 Million Identities on a Single Machine
- See the original paper:
- https://arxiv.org/abs/2010.05222
- """
-
- @torch.no_grad()
- def __init__(self, rank, local_rank, world_size, batch_size, resume,
- margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"):
- """
- rank: int
- Unique process(GPU) ID from 0 to world_size - 1.
- local_rank: int
- Unique process(GPU) ID within the server from 0 to 7.
- world_size: int
- Number of GPU.
- batch_size: int
- Batch size on current rank(GPU).
- resume: bool
- Select whether to restore the weight of softmax.
- margin_softmax: callable
- A function of margin softmax, eg: cosface, arcface.
- num_classes: int
- The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
- required.
- sample_rate: float
- The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
- can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
- embedding_size: int
- The feature dimension, default is 512.
- prefix: str
- Path for save checkpoint, default is './'.
- """
- super(PartialFC, self).__init__()
- #
- self.num_classes: int = num_classes
- self.rank: int = rank
- self.local_rank: int = local_rank
- self.device: torch.device = torch.device("cuda:{}".format(self.local_rank))
- self.world_size: int = world_size
- self.batch_size: int = batch_size
- self.margin_softmax: callable = margin_softmax
- self.sample_rate: float = sample_rate
- self.embedding_size: int = embedding_size
- self.prefix: str = prefix
- self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size)
- self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size)
- self.num_sample: int = int(self.sample_rate * self.num_local)
-
- self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank))
- self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank))
-
- if resume:
- try:
- self.weight: torch.Tensor = torch.load(self.weight_name)
- self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name)
- if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local:
- raise IndexError
- logging.info("softmax weight resume successfully!")
- logging.info("softmax weight mom resume successfully!")
- except (FileNotFoundError, KeyError, IndexError):
- self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
- self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
- logging.info("softmax weight init!")
- logging.info("softmax weight mom init!")
- else:
- self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
- self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
- logging.info("softmax weight init successfully!")
- logging.info("softmax weight mom init successfully!")
- self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank)
-
- self.index = None
- if int(self.sample_rate) == 1:
- self.update = lambda: 0
- self.sub_weight = Parameter(self.weight)
- self.sub_weight_mom = self.weight_mom
- else:
- self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
-
- def save_params(self):
- """ Save softmax weight for each rank on prefix
- """
- torch.save(self.weight.data, self.weight_name)
- torch.save(self.weight_mom, self.weight_mom_name)
-
- @torch.no_grad()
- def sample(self, total_label):
- """
- Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
- `num_sample`.
-
- total_label: tensor
- Label after all gather, which cross all GPUs.
- """
- index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local)
- total_label[~index_positive] = -1
- total_label[index_positive] -= self.class_start
- if int(self.sample_rate) != 1:
- positive = torch.unique(total_label[index_positive], sorted=True)
- if self.num_sample - positive.size(0) >= 0:
- perm = torch.rand(size=[self.num_local], device=self.device)
- perm[positive] = 2.0
- index = torch.topk(perm, k=self.num_sample)[1]
- index = index.sort()[0]
- else:
- index = positive
- self.index = index
- total_label[index_positive] = torch.searchsorted(index, total_label[index_positive])
- self.sub_weight = Parameter(self.weight[index])
- self.sub_weight_mom = self.weight_mom[index]
-
- def forward(self, total_features, norm_weight):
- """ Partial fc forward, `logits = X * sample(W)`
- """
- torch.cuda.current_stream().wait_stream(self.stream)
- logits = linear(total_features, norm_weight)
- return logits
-
- @torch.no_grad()
- def update(self):
- """ Set updated weight and weight_mom to memory bank.
- """
- self.weight_mom[self.index] = self.sub_weight_mom
- self.weight[self.index] = self.sub_weight
-
- def prepare(self, label, optimizer):
- """
- get sampled class centers for cal softmax.
-
- label: tensor
- Label tensor on each rank.
- optimizer: opt
- Optimizer for partial fc, which need to get weight mom.
- """
- with torch.cuda.stream(self.stream):
- total_label = torch.zeros(
- size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long)
- dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label)
- self.sample(total_label)
- optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None)
- optimizer.param_groups[-1]['params'][0] = self.sub_weight
- optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom
- norm_weight = normalize(self.sub_weight)
- return total_label, norm_weight
-
- def forward_backward(self, label, features, optimizer):
- """
- Partial fc forward and backward with model parallel
-
- label: tensor
- Label tensor on each rank(GPU)
- features: tensor
- Features tensor on each rank(GPU)
- optimizer: optimizer
- Optimizer for partial fc
-
- Returns:
- --------
- x_grad: tensor
- The gradient of features.
- loss_v: tensor
- Loss value for cross entropy.
- """
- total_label, norm_weight = self.prepare(label, optimizer)
- total_features = torch.zeros(
- size=[self.batch_size * self.world_size, self.embedding_size], device=self.device)
- dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data)
- total_features.requires_grad = True
-
- logits = self.forward(total_features, norm_weight)
- logits = self.margin_softmax(logits, total_label)
-
- with torch.no_grad():
- max_fc = torch.max(logits, dim=1, keepdim=True)[0]
- dist.all_reduce(max_fc, dist.ReduceOp.MAX)
-
- # calculate exp(logits) and all-reduce
- logits_exp = torch.exp(logits - max_fc)
- logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
- dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)
-
- # calculate prob
- logits_exp.div_(logits_sum_exp)
-
- # get one-hot
- grad = logits_exp
- index = torch.where(total_label != -1)[0]
- one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device)
- one_hot.scatter_(1, total_label[index, None], 1)
-
- # calculate loss
- loss = torch.zeros(grad.size()[0], 1, device=grad.device)
- loss[index] = grad[index].gather(1, total_label[index, None])
- dist.all_reduce(loss, dist.ReduceOp.SUM)
- loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)
-
- # calculate grad
- grad[index] -= one_hot
- grad.div_(self.batch_size * self.world_size)
-
- logits.backward(grad)
- if total_features.grad is not None:
- total_features.grad.detach_()
- x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True)
- # feature gradient all-reduce
- dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0)))
- x_grad = x_grad * self.world_size
- # backward backbone
- return x_grad, loss_v
diff --git a/spaces/AIatUIUC/CodeLATS/lats/lats.py b/spaces/AIatUIUC/CodeLATS/lats/lats.py
deleted file mode 100644
index 7a8602e19f82aa3e2efcbd27c86d664681512628..0000000000000000000000000000000000000000
--- a/spaces/AIatUIUC/CodeLATS/lats/lats.py
+++ /dev/null
@@ -1,233 +0,0 @@
-from utils import enumerate_resume, make_printv, write_jsonl, resume_success_count
-from executors import executor_factory
-from generators import generator_factory, model_factory
-from typing import List, Dict, Any
-import math
-from typing import Tuple
-import sys
-import random
-
-sys.set_int_max_str_digits(100000) # Increase the limit to 10000 digits
-
-react_prompt_header = "Here are some previous solutions and the corresponding test results.\n"
-react_prompt_starter = "\n\nYour solution:\n"
-extra_header = "\n\nName the function answer()"
-
-class Node:
- def __init__(self, solution: str, parent=None, context="", depth=0):
- self.solution = solution
- self.parent = parent
- self.children = []
- self.value = 0
- self.visits = 0
- self.context = ""
- self.depth = depth
- self.reflection = ""
- self.test_feedback = ""
-
- def uct(self, exploration_weight=1.0):
- if self.visits == 0:
- #return float('inf')
- return self.value
- return (self.value / self.visits) + exploration_weight * math.sqrt(math.log(self.parent.visits) / self.visits)
-
- def best_child(self):
- if not self.children: # Check if children list is empty
- return None
- return max(self.children, key=lambda child: child.uct())
-
- def best_child_value(self):
- if not self.children: # Check if children list is empty
- return None
- return max(self.children, key=lambda child: child.value)
-
- def update(self, reward: float):
- self.visits += 1
- self.value += reward
-
-
-def prune_context_blocks(context: str, max_length: int) -> str:
- """Prune the context to fit within the specified max_length by removing entire blocks of content using 'trial' as a delimiter."""
- if len(context) <= max_length:
- return context
-
- # Split by the block delimiter "trial".
- blocks = context.split('Previous Trial')
-
- # Remove the earliest blocks until the context fits within max_length.
- while len('trial'.join(blocks)) > max_length and blocks:
- blocks.pop(0)
-
- return 'trial'.join(blocks)
-
-def gather_context_from_tree(node: Node) -> Tuple[List[str], List[str]]:
- """
- Given a node, walk up its tree and gather the feedback and reflections
- from each parent node until the root is reached.
-
- Args:
- node (Node): The node to start gathering context from.
-
- Returns:
- Tuple[List[str], List[str]]: Two lists containing the accumulated feedback and reflections.
- """
- accumulated_feedback = []
- accumulated_reflection = []
- num_nodes = 0
-
- while node and num_nodes < 2:
- num_nodes += 1
- if node.test_feedback:
- accumulated_feedback.append(node.test_feedback)
- if node.reflection:
- accumulated_reflection.append(node.reflection)
- node = node.parent
-
- # Reverse the lists so that the context from the earliest nodes is first
- return accumulated_feedback[::-1], accumulated_reflection[::-1]
-
-def sample_n_random(items: List[str], n: int) -> List[str]:
- """Sample min(n, len(items)) random items from a list"""
- assert n >= 0
- if n >= len(items):
- return items
- return random.sample(items, n)
-
-def run_lats(
- model_name: str,
- language: str,
- max_iters: int,
- verbose: bool,
- instruction: str = "Write some code to print Hello World in Python",
- n_samples: int = 3,
- depth: int = 5,
-) -> None:
- exe = executor_factory(language)
- gen = generator_factory(language)
- model = model_factory(model_name)
-
-
- num_success = 0 # Counter for successful solutions
- cur_func_impl = None
-
- item = {}
-
- #for idx, item in enumerate(dataset):
-
- tests = gen.internal_tests(instruction + extra_header, model, 1)
- tests_i = sample_n_random(tests, 1)
-
- while cur_func_impl is None:
- cur_func_impl = gen.func_impl(instruction + extra_header, model, "simple")
- root = Node(cur_func_impl) # initial solution (for pass@1 metric)
-
- # Lists for logging
- reflections = []
- implementations = []
- test_feedback = []
- is_solved = False
-
- # first attempt
-
- implementations.append(cur_func_impl)
- assert isinstance(cur_func_impl, str)
- is_passing, feedback, _ = exe.execute(cur_func_impl, tests_i)
- test_feedback.append(feedback)
-
- # if solved, exit early
- if is_passing:
- num_success += 1
- return cur_func_impl # GET SOLUTION
-
- reflection = gen.self_reflection(cur_func_impl, feedback, model)
- reflections += [reflection]
- root.test_feedback = feedback
- root.reflection = reflection
- max_iters = int(max_iters)
- for cur_iter in range(max_iters):
- # Selection
- tests_i = sample_n_random(tests, 1)
-
- node = root
- trajectory = {
- 'solutions': [],
- 'feedbacks': []
- }
-
- while node.children:
- node = node.best_child()
- trajectory['solutions'].append(node.solution)
-
- # Expansion
- for _ in range(n_samples):
- new_solution = None
- strategy = "mcts"
- prev_func_impl = node.solution
- feedback = node.test_feedback
- reflection = node.reflection
- acc_feedback, acc_reflection = gather_context_from_tree(node)
-
- while new_solution is None:
- new_solution = gen.func_impl(
- func_sig=instruction+extra_header,
- model=model,
- strategy=strategy,
- prev_func_impl=prev_func_impl,
- feedback=feedback,
- self_reflection=reflection,
- acc_feedback = acc_feedback,
- acc_reflection = acc_reflection
- )
-
- combined_context = "\nPrevious Trial\n\n" + new_solution
-
- child = Node(new_solution, parent=node, context=combined_context, depth=node.depth + 1)
- node.children.append(child)
-
- # Simulation
- reward_real = 0
- for child in node.children:
- is_passing_internal, feedback_internal, _ = exe.execute(child.solution, tests_i)
- if not is_passing_internal:
- reflection = gen.self_reflection(child.solution, feedback_internal, model)
- reflections.append(reflection)
- child.reflection = reflection
- child.test_feedback = feedback_internal
- child.context += "\n\nPrevious Trial\n\n" + child.solution + "\n\nTest results: \n" + feedback_internal + "\n\nSelf-reflection: " + reflection
- else:
- child.context += "\n\nPrevious Trial\n\n" + child.solution + "\n\nTest results: \n" + feedback_internal
- child.reflection = ""
- child.test_feedback = feedback_internal
-
- if "Tested passed:" in feedback_internal:
- # Split at "Tests failed:" and get the part before it (which contains the passed tests)
- passed_section = feedback_internal.split("Tests failed:")[0]
- # Split at "Tested passed:" and get the part after it, then count the non-empty lines
- reward_internal = len([line for line in passed_section.split("Tested passed:")[1].splitlines() if line.strip() != ''])
- reward_internal = reward_internal / len(tests_i)
- else:
- reward_internal = 0
- if is_passing_internal or cur_iter == max_iters - 1:
- item["solution"] = child.solution
- break
-
- if is_solved:
- break
-
- reward = reward_internal + reward_real
- child.update(reward)
-
- # Backpropagation
- temp = child
- while temp.parent:
- temp = temp.parent
- temp.update(reward)
-
- # Choose the best solution after all iterations
- if is_solved:
- best_solution = item["solution"]
- else:
- best_solution = root.best_child_value().solution
- item["solution"] = best_solution
-
- return best_solution
\ No newline at end of file
diff --git a/spaces/AONYLMR/White-box-Cartoonization/app.py b/spaces/AONYLMR/White-box-Cartoonization/app.py
deleted file mode 100644
index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000
--- a/spaces/AONYLMR/White-box-Cartoonization/app.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-import argparse
-import functools
-import os
-import pathlib
-import sys
-from typing import Callable
-import uuid
-
-import gradio as gr
-import huggingface_hub
-import numpy as np
-import PIL.Image
-
-from io import BytesIO
-from wbc.cartoonize import Cartoonize
-
-ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization'
-TITLE = 'SystemErrorWang/White-box-Cartoonization'
-DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}.
-
-"""
-ARTICLE = """
-
-"""
-
-SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
-def compress_UUID():
- '''
- 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串
- 包括:[0-9a-zA-Z\-_]共64个
- 长度:(32-2)/3*2=20
- 备注:可在地球上人zhi人都用,使用100年不重复(2^120)
- :return:String
- '''
- row = str(uuid.uuid4()).replace('-', '')
- safe_code = ''
- for i in range(10):
- enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10)
- safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)])
- safe_code = safe_code.replace('-', '')
- return safe_code
-
-
-def parse_args() -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--theme', type=str)
- parser.add_argument('--live', action='store_true')
- parser.add_argument('--share', action='store_true')
- parser.add_argument('--port', type=int)
- parser.add_argument('--disable-queue',
- dest='enable_queue',
- action='store_false')
- parser.add_argument('--allow-flagging', type=str, default='never')
- parser.add_argument('--allow-screenshot', action='store_true')
- return parser.parse_args()
-
-def run(
- image,
- cartoonize : Cartoonize
-) -> tuple[PIL.Image.Image]:
-
- out_path = compress_UUID()+'.png'
- cartoonize.run_sigle(image.name, out_path)
-
- return PIL.Image.open(out_path)
-
-
-def main():
- gr.close_all()
-
- args = parse_args()
-
- cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/'))
-
- func = functools.partial(run, cartoonize=cartoonize)
- func = functools.update_wrapper(func, run)
-
- gr.Interface(
- func,
- [
- gr.inputs.Image(type='file', label='Input Image'),
- ],
- [
- gr.outputs.Image(
- type='pil',
- label='Result'),
- ],
- # examples=examples,
- theme=args.theme,
- title=TITLE,
- description=DESCRIPTION,
- article=ARTICLE,
- allow_screenshot=args.allow_screenshot,
- allow_flagging=args.allow_flagging,
- live=args.live,
- ).launch(
- enable_queue=args.enable_queue,
- server_port=args.port,
- share=args.share,
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Adapter/CoAdapter/ldm/util.py b/spaces/Adapter/CoAdapter/ldm/util.py
deleted file mode 100644
index dc9e3c48b1924fbc1ac3ecdf7a2192e1a46d9228..0000000000000000000000000000000000000000
--- a/spaces/Adapter/CoAdapter/ldm/util.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import importlib
-import math
-
-import cv2
-import torch
-import numpy as np
-
-import os
-from safetensors.torch import load_file
-
-from inspect import isfunction
-from PIL import Image, ImageDraw, ImageFont
-
-
-def log_txt_as_img(wh, xc, size=10):
- # wh a tuple of (width, height)
- # xc a list of captions to plot
- b = len(xc)
- txts = list()
- for bi in range(b):
- txt = Image.new("RGB", wh, color="white")
- draw = ImageDraw.Draw(txt)
- font = ImageFont.truetype('assets/DejaVuSans.ttf', size=size)
- nc = int(40 * (wh[0] / 256))
- lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
-
- try:
- draw.text((0, 0), lines, fill="black", font=font)
- except UnicodeEncodeError:
- print("Cant encode string for logging. Skipping.")
-
- txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
- txts.append(txt)
- txts = np.stack(txts)
- txts = torch.tensor(txts)
- return txts
-
-
-def ismap(x):
- if not isinstance(x, torch.Tensor):
- return False
- return (len(x.shape) == 4) and (x.shape[1] > 3)
-
-
-def isimage(x):
- if not isinstance(x, torch.Tensor):
- return False
- return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
-
-
-def exists(x):
- return x is not None
-
-
-def default(val, d):
- if exists(val):
- return val
- return d() if isfunction(d) else d
-
-
-def mean_flat(tensor):
- """
- https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def count_params(model, verbose=False):
- total_params = sum(p.numel() for p in model.parameters())
- if verbose:
- print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
- return total_params
-
-
-def instantiate_from_config(config):
- if not "target" in config:
- if config == '__is_first_stage__':
- return None
- elif config == "__is_unconditional__":
- return None
- raise KeyError("Expected key `target` to instantiate.")
- return get_obj_from_str(config["target"])(**config.get("params", dict()))
-
-
-def get_obj_from_str(string, reload=False):
- module, cls = string.rsplit(".", 1)
- if reload:
- module_imp = importlib.import_module(module)
- importlib.reload(module_imp)
- return getattr(importlib.import_module(module, package=None), cls)
-
-
-checkpoint_dict_replacements = {
- 'cond_stage_model.transformer.text_model.embeddings.': 'cond_stage_model.transformer.embeddings.',
- 'cond_stage_model.transformer.text_model.encoder.': 'cond_stage_model.transformer.encoder.',
- 'cond_stage_model.transformer.text_model.final_layer_norm.': 'cond_stage_model.transformer.final_layer_norm.',
-}
-
-
-def transform_checkpoint_dict_key(k):
- for text, replacement in checkpoint_dict_replacements.items():
- if k.startswith(text):
- k = replacement + k[len(text):]
-
- return k
-
-
-def get_state_dict_from_checkpoint(pl_sd):
- pl_sd = pl_sd.pop("state_dict", pl_sd)
- pl_sd.pop("state_dict", None)
-
- sd = {}
- for k, v in pl_sd.items():
- new_key = transform_checkpoint_dict_key(k)
-
- if new_key is not None:
- sd[new_key] = v
-
- pl_sd.clear()
- pl_sd.update(sd)
-
- return pl_sd
-
-
-def read_state_dict(checkpoint_file, print_global_state=False):
- _, extension = os.path.splitext(checkpoint_file)
- if extension.lower() == ".safetensors":
- pl_sd = load_file(checkpoint_file, device='cpu')
- else:
- pl_sd = torch.load(checkpoint_file, map_location='cpu')
-
- if print_global_state and "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
-
- sd = get_state_dict_from_checkpoint(pl_sd)
- return sd
-
-
-def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
- print(f"Loading model from {ckpt}")
- sd = read_state_dict(ckpt)
- model = instantiate_from_config(config.model)
- m, u = model.load_state_dict(sd, strict=False)
- if len(m) > 0 and verbose:
- print("missing keys:")
- print(m)
- if len(u) > 0 and verbose:
- print("unexpected keys:")
- print(u)
-
- if 'anything' in ckpt.lower() and vae_ckpt is None:
- vae_ckpt = 'models/anything-v4.0.vae.pt'
-
- if vae_ckpt is not None and vae_ckpt != 'None':
- print(f"Loading vae model from {vae_ckpt}")
- vae_sd = torch.load(vae_ckpt, map_location="cpu")
- if "global_step" in vae_sd:
- print(f"Global Step: {vae_sd['global_step']}")
- sd = vae_sd["state_dict"]
- m, u = model.first_stage_model.load_state_dict(sd, strict=False)
- if len(m) > 0 and verbose:
- print("missing keys:")
- print(m)
- if len(u) > 0 and verbose:
- print("unexpected keys:")
- print(u)
-
- model.cuda()
- model.eval()
- return model
-
-
-def resize_numpy_image(image, max_resolution=512 * 512, resize_short_edge=None):
- h, w = image.shape[:2]
- if resize_short_edge is not None:
- k = resize_short_edge / min(h, w)
- else:
- k = max_resolution / (h * w)
- k = k**0.5
- h = int(np.round(h * k / 64)) * 64
- w = int(np.round(w * k / 64)) * 64
- image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
- return image
-
-
-# make uc and prompt shapes match via padding for long prompts
-null_cond = None
-
-def fix_cond_shapes(model, prompt_condition, uc):
- if uc is None:
- return prompt_condition, uc
- global null_cond
- if null_cond is None:
- null_cond = model.get_learned_conditioning([""])
- while prompt_condition.shape[1] > uc.shape[1]:
- uc = torch.cat((uc, null_cond.repeat((uc.shape[0], 1, 1))), axis=1)
- while prompt_condition.shape[1] < uc.shape[1]:
- prompt_condition = torch.cat((prompt_condition, null_cond.repeat((prompt_condition.shape[0], 1, 1))), axis=1)
- return prompt_condition, uc
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/texttranslation.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/texttranslation.js
deleted file mode 100644
index cfc9efc10aac2b7ed442e71318457d95cff71161..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/texttranslation.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import TextTranslation from './behaviors/texttranslation/TextTranslation.js';
-export default TextTranslation;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.js
deleted file mode 100644
index 015b25f700f51254fa04343aad382184d2b24bc9..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.js
+++ /dev/null
@@ -1,145 +0,0 @@
-import Sizer from '../../sizer/Sizer.js';
-import CreateSwatch from './methods/CreateSwatch.js';
-import CreateInputText from '../../utils/build/CreateInputText.js';
-import ColorStringToInteger from '../../../../plugins/utils/color/ColorStringToInteger.js';
-import GetHexColorString from '../../../../plugins/utils/color/GetHexColorString.js';
-import SetSwatchColor from './methods/SetSwatchColor.js';
-import ResizeGameObject from '../../../../plugins/utils/size/ResizeGameObject.js';
-
-const GetValue = Phaser.Utils.Objects.GetValue;
-const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
-const Clamp = Phaser.Math.Clamp;
-
-class ColorInput extends Sizer {
- constructor(scene, config) {
- if (config === undefined) {
- config = {};
- }
- config.orientation = 0;
- super(scene, config);
- this.type = 'rexColorInputLite';
-
- // Add elements
- var background = GetValue(config, 'background', undefined);
-
- var swatchConfig = GetValue(config, 'swatch');
- var swatchSize;
- if (IsPlainObject(swatchConfig)) {
- swatchSize = GetValue(swatchConfig, 'size');
- }
- var swatch = CreateSwatch(scene, GetValue(config, 'swatch'));
-
- var inputTextConfig = GetValue(config, 'inputText', true);
- var inputText;
- if (inputTextConfig) {
- inputText = CreateInputText(scene, inputTextConfig);
- }
-
- if (background) {
- this.addBackground(background);
- }
-
- if (swatch) {
- swatchSize = GetValue(config, 'swatchSize', swatchSize);
- var squareExpandSwatch;
- if (swatchSize !== undefined) {
- ResizeGameObject(swatch, swatchSize, swatchSize);
- squareExpandSwatch = false;
- } else {
- squareExpandSwatch = GetValue(config, 'squareExpandSwatch', true);
- }
-
- var fitRatio = (squareExpandSwatch) ? 1 : 0;
- this.add(
- swatch,
- { proportion: 0, expand: false, fitRatio: fitRatio }
- );
- }
-
- if (inputText) {
- var proportion = (GetValue(inputTextConfig, 'width') === undefined) ? 1 : 0;
- var expand = (GetValue(inputTextConfig, 'height') === undefined) ? true : false;
- this.add(
- inputText,
- { proportion: proportion, expand: expand }
- )
- }
-
- this.addChildrenMap('background', background);
- this.addChildrenMap('swatch', swatch);
- this.addChildrenMap('inputText', inputText);
-
-
- if (inputText) {
- inputText.on('close', function () {
- this.setValue(inputText.value);
- }, this);
- }
-
- var callback = GetValue(config, 'valuechangeCallback', null);
- if (callback !== null) {
- var scope = GetValue(config, 'valuechangeCallbackScope', undefined);
- this.on('valuechange', callback, scope);
- }
-
- this.setValue(GetValue(config, 'value', 0x0));
- }
-
- get value() {
- return this._value;
- }
-
- set value(value) {
- if (typeof (value) === 'string') {
- value = ColorStringToInteger(value);
- if (value == null) {
- var inputText = this.childrenMap.inputText;
- if (inputText) {
- inputText.setText(GetHexColorString(this._value));
- }
- return;
- }
- } else {
- value = Clamp(Math.floor(value), 0, 0xffffff);
- }
-
- if (this._value === value) {
- return;
- }
-
- this._value = value;
-
- var swatch = this.childrenMap.swatch;
- if (swatch) {
- SetSwatchColor(swatch, value);
- }
-
- var inputText = this.childrenMap.inputText;
- if (inputText) {
- inputText.setText(GetHexColorString(value));
- }
-
- this.emit('valuechange', this._value);
- }
-
- setValue(value) {
- this.value = value;
- return this;
- }
-
- get color() {
- return this._value;
- }
-
- set color(color) {
- this.value = color;
- }
-
- setColor(color) {
- this.color = color;
- return this;
- }
-
-}
-
-export default ColorInput;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/Factory.js
deleted file mode 100644
index 2360e5f5a6d8add88e2f89ad78999e983974a283..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import TabPages from './TabPages.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('tabPages', function (config) {
- var gameObject = new TabPages(this.scene, config);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.UI.TabPages', TabPages);
-
-export default TabPages;
\ No newline at end of file
diff --git a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/replicate.py b/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/replicate.py
deleted file mode 100644
index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/replicate.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : replicate.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-import functools
-
-from torch.nn.parallel.data_parallel import DataParallel
-
-__all__ = [
- 'CallbackContext',
- 'execute_replication_callbacks',
- 'DataParallelWithCallback',
- 'patch_replication_callback'
-]
-
-
-class CallbackContext(object):
- pass
-
-
-def execute_replication_callbacks(modules):
- """
- Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
-
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
-
- Note that, as all modules are isomorphism, we assign each sub-module with a context
- (shared among multiple copies of this module on different devices).
- Through this context, different copies can share some information.
-
- We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
- of any slave copies.
- """
- master_copy = modules[0]
- nr_modules = len(list(master_copy.modules()))
- ctxs = [CallbackContext() for _ in range(nr_modules)]
-
- for i, module in enumerate(modules):
- for j, m in enumerate(module.modules()):
- if hasattr(m, '__data_parallel_replicate__'):
- m.__data_parallel_replicate__(ctxs[j], i)
-
-
-class DataParallelWithCallback(DataParallel):
- """
- Data Parallel with a replication callback.
-
- An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
- original `replicate` function.
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
-
- Examples:
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
- # sync_bn.__data_parallel_replicate__ will be invoked.
- """
-
- def replicate(self, module, device_ids):
- modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
- execute_replication_callbacks(modules)
- return modules
-
-
-def patch_replication_callback(data_parallel):
- """
- Monkey-patch an existing `DataParallel` object. Add the replication callback.
- Useful when you have customized `DataParallel` implementation.
-
- Examples:
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
- > patch_replication_callback(sync_bn)
- # this is equivalent to
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
- """
-
- assert isinstance(data_parallel, DataParallel)
-
- old_replicate = data_parallel.replicate
-
- @functools.wraps(old_replicate)
- def new_replicate(module, device_ids):
- modules = old_replicate(module, device_ids)
- execute_replication_callbacks(modules)
- return modules
-
- data_parallel.replicate = new_replicate
diff --git a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py b/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py
deleted file mode 100644
index fc26ab82e552331bc8d75b34e81000418f4d38ec..0000000000000000000000000000000000000000
--- a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import numpy as np
-import onnx
-import torch
-
-
-def convert_onnx(net, path_module, output, opset=11, simplify=False):
- assert isinstance(net, torch.nn.Module)
- img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
- img = img.astype(np.float)
- img = (img / 255. - 0.5) / 0.5 # torch style norm
- img = img.transpose((2, 0, 1))
- img = torch.from_numpy(img).unsqueeze(0).float()
-
- weight = torch.load(path_module)
- net.load_state_dict(weight)
- net.eval()
- torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
- model = onnx.load(output)
- graph = model.graph
- graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
- if simplify:
- from onnxsim import simplify
- model, check = simplify(model)
- assert check, "Simplified ONNX model could not be validated"
- onnx.save(model, output)
-
-
-if __name__ == '__main__':
- import os
- import argparse
- from backbones import get_model
-
- parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
- parser.add_argument('input', type=str, help='input backbone.pth file or path')
- parser.add_argument('--output', type=str, default=None, help='output onnx path')
- parser.add_argument('--network', type=str, default=None, help='backbone network')
- parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
- args = parser.parse_args()
- input_file = args.input
- if os.path.isdir(input_file):
- input_file = os.path.join(input_file, "backbone.pth")
- assert os.path.exists(input_file)
- model_name = os.path.basename(os.path.dirname(input_file)).lower()
- params = model_name.split("_")
- if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
- if args.network is None:
- args.network = params[2]
- assert args.network is not None
- print(args)
- backbone_onnx = get_model(args.network, dropout=0)
-
- output_path = args.output
- if output_path is None:
- output_path = os.path.join(os.path.dirname(__file__), 'onnx')
- if not os.path.exists(output_path):
- os.makedirs(output_path)
- assert os.path.isdir(output_path)
- output_file = os.path.join(output_path, "%s.onnx" % model_name)
- convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify)
diff --git a/spaces/Alpaca233/SadTalker/src/utils/text2speech.py b/spaces/Alpaca233/SadTalker/src/utils/text2speech.py
deleted file mode 100644
index 00d165b6cc7774fd200929aafa0ff3b15916111e..0000000000000000000000000000000000000000
--- a/spaces/Alpaca233/SadTalker/src/utils/text2speech.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-import tempfile
-from TTS.api import TTS
-
-
-class TTSTalker():
- def __init__(self) -> None:
- model_name = TTS.list_models()[0]
- self.tts = TTS(model_name)
-
- def test(self, text, language='en'):
-
- tempf = tempfile.NamedTemporaryFile(
- delete = False,
- suffix = ('.'+'wav'),
- )
-
- self.tts.tts_to_file(text, speaker=self.tts.speakers[0], language=language, file_path=tempf.name)
-
- return tempf.name
\ No newline at end of file
diff --git a/spaces/Ameaou/academic-chatgpt3.1/theme.py b/spaces/Ameaou/academic-chatgpt3.1/theme.py
deleted file mode 100644
index 1cc26b06d994eba6d37aa86f3bbfc12fc164731c..0000000000000000000000000000000000000000
--- a/spaces/Ameaou/academic-chatgpt3.1/theme.py
+++ /dev/null
@@ -1,231 +0,0 @@
-import gradio as gr
-from toolbox import get_conf
-CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT')
-# gradio可用颜色列表
-# gr.themes.utils.colors.slate (石板色)
-# gr.themes.utils.colors.gray (灰色)
-# gr.themes.utils.colors.zinc (锌色)
-# gr.themes.utils.colors.neutral (中性色)
-# gr.themes.utils.colors.stone (石头色)
-# gr.themes.utils.colors.red (红色)
-# gr.themes.utils.colors.orange (橙色)
-# gr.themes.utils.colors.amber (琥珀色)
-# gr.themes.utils.colors.yellow (黄色)
-# gr.themes.utils.colors.lime (酸橙色)
-# gr.themes.utils.colors.green (绿色)
-# gr.themes.utils.colors.emerald (祖母绿)
-# gr.themes.utils.colors.teal (青蓝色)
-# gr.themes.utils.colors.cyan (青色)
-# gr.themes.utils.colors.sky (天蓝色)
-# gr.themes.utils.colors.blue (蓝色)
-# gr.themes.utils.colors.indigo (靛蓝色)
-# gr.themes.utils.colors.violet (紫罗兰色)
-# gr.themes.utils.colors.purple (紫色)
-# gr.themes.utils.colors.fuchsia (洋红色)
-# gr.themes.utils.colors.pink (粉红色)
-# gr.themes.utils.colors.rose (玫瑰色)
-
-
-def adjust_theme():
- try:
- color_er = gr.themes.utils.colors.fuchsia
- set_theme = gr.themes.Default(
- primary_hue=gr.themes.utils.colors.orange,
- neutral_hue=gr.themes.utils.colors.gray,
- font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
- "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
- font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
- set_theme.set(
- # Colors
- input_background_fill_dark="*neutral_800",
- # Transition
- button_transition="none",
- # Shadows
- button_shadow="*shadow_drop",
- button_shadow_hover="*shadow_drop_lg",
- button_shadow_active="*shadow_inset",
- input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
- input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
- input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
- checkbox_label_shadow="*shadow_drop",
- block_shadow="*shadow_drop",
- form_gap_width="1px",
- # Button borders
- input_border_width="1px",
- input_background_fill="white",
- # Gradients
- stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
- stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
- error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
- error_background_fill_dark="*background_fill_primary",
- checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
- checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
- checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
- checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
- button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
- button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
- button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
- button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
- button_primary_border_color_dark="*primary_500",
- button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
- button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
- button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
- button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
- button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
- button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
- button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
- button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
- button_cancel_border_color=color_er.c200,
- button_cancel_border_color_dark=color_er.c600,
- button_cancel_text_color=color_er.c600,
- button_cancel_text_color_dark="white",
- )
- except:
- set_theme = None
- print('gradio版本较旧, 不能自定义字体和颜色')
- return set_theme
-
-
-advanced_css = """
-/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
-.markdown-body table {
- margin: 1em 0;
- border-collapse: collapse;
- empty-cells: show;
-}
-
-/* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */
-.markdown-body th, .markdown-body td {
- border: 1.2px solid var(--border-color-primary);
- padding: 5px;
-}
-
-/* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */
-.markdown-body thead {
- background-color: rgba(175,184,193,0.2);
-}
-
-/* 设置表头单元格的内边距为0.5em和0.2em. */
-.markdown-body thead th {
- padding: .5em .2em;
-}
-
-/* 去掉列表前缀的默认间距,使其与文本线对齐. */
-.markdown-body ol, .markdown-body ul {
- padding-inline-start: 2em !important;
-}
-
-/* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */
-[class *= "message"] {
- border-radius: var(--radius-xl) !important;
- /* padding: var(--spacing-xl) !important; */
- /* font-size: var(--text-md) !important; */
- /* line-height: var(--line-md) !important; */
- /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
- /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
-}
-[data-testid = "bot"] {
- max-width: 95%;
- /* width: auto !important; */
- border-bottom-left-radius: 0 !important;
-}
-[data-testid = "user"] {
- max-width: 100%;
- /* width: auto !important; */
- border-bottom-right-radius: 0 !important;
-}
-
-/* 行内代码的背景设为淡灰色,设定圆角和间距. */
-.markdown-body code {
- display: inline;
- white-space: break-spaces;
- border-radius: 6px;
- margin: 0 2px 0 2px;
- padding: .2em .4em .1em .4em;
- background-color: rgba(175,184,193,0.2);
-}
-/* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */
-.markdown-body pre code {
- display: block;
- overflow: auto;
- white-space: pre;
- background-color: rgba(175,184,193,0.2);
- border-radius: 10px;
- padding: 1em;
- margin: 1em 2em 1em 0.5em;
-}
-
-"""
-
-if CODE_HIGHLIGHT:
- advanced_css += """
-
-.hll { background-color: #ffffcc }
-.c { color: #3D7B7B; font-style: italic } /* Comment */
-.err { border: 1px solid #FF0000 } /* Error */
-.k { color: hsl(197, 94%, 51%); font-weight: bold } /* Keyword */
-.o { color: #666666 } /* Operator */
-.ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
-.cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
-.cp { color: #9C6500 } /* Comment.Preproc */
-.cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
-.c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
-.cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
-.gd { color: #A00000 } /* Generic.Deleted */
-.ge { font-style: italic } /* Generic.Emph */
-.gr { color: #E40000 } /* Generic.Error */
-.gh { color: #000080; font-weight: bold } /* Generic.Heading */
-.gi { color: #008400 } /* Generic.Inserted */
-.go { color: #717171 } /* Generic.Output */
-.gp { color: #000080; font-weight: bold } /* Generic.Prompt */
-.gs { font-weight: bold } /* Generic.Strong */
-.gu { color: #800080; font-weight: bold } /* Generic.Subheading */
-.gt { color: #a9dd00 } /* Generic.Traceback */
-.kc { color: #008000; font-weight: bold } /* Keyword.Constant */
-.kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
-.kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
-.kp { color: #008000 } /* Keyword.Pseudo */
-.kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
-.kt { color: #B00040 } /* Keyword.Type */
-.m { color: #666666 } /* Literal.Number */
-.s { color: #BA2121 } /* Literal.String */
-.na { color: #687822 } /* Name.Attribute */
-.nb { color: #e5f8c3 } /* Name.Builtin */
-.nc { color: #ffad65; font-weight: bold } /* Name.Class */
-.no { color: #880000 } /* Name.Constant */
-.nd { color: #AA22FF } /* Name.Decorator */
-.ni { color: #717171; font-weight: bold } /* Name.Entity */
-.ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
-.nf { color: #f9f978 } /* Name.Function */
-.nl { color: #767600 } /* Name.Label */
-.nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
-.nt { color: #008000; font-weight: bold } /* Name.Tag */
-.nv { color: #19177C } /* Name.Variable */
-.ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
-.w { color: #bbbbbb } /* Text.Whitespace */
-.mb { color: #666666 } /* Literal.Number.Bin */
-.mf { color: #666666 } /* Literal.Number.Float */
-.mh { color: #666666 } /* Literal.Number.Hex */
-.mi { color: #666666 } /* Literal.Number.Integer */
-.mo { color: #666666 } /* Literal.Number.Oct */
-.sa { color: #BA2121 } /* Literal.String.Affix */
-.sb { color: #BA2121 } /* Literal.String.Backtick */
-.sc { color: #BA2121 } /* Literal.String.Char */
-.dl { color: #BA2121 } /* Literal.String.Delimiter */
-.sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
-.s2 { color: #2bf840 } /* Literal.String.Double */
-.se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
-.sh { color: #BA2121 } /* Literal.String.Heredoc */
-.si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
-.sx { color: #008000 } /* Literal.String.Other */
-.sr { color: #A45A77 } /* Literal.String.Regex */
-.s1 { color: #BA2121 } /* Literal.String.Single */
-.ss { color: #19177C } /* Literal.String.Symbol */
-.bp { color: #008000 } /* Name.Builtin.Pseudo */
-.fm { color: #0000FF } /* Name.Function.Magic */
-.vc { color: #19177C } /* Name.Variable.Class */
-.vg { color: #19177C } /* Name.Variable.Global */
-.vi { color: #19177C } /* Name.Variable.Instance */
-.vm { color: #19177C } /* Name.Variable.Magic */
-.il { color: #666666 } /* Literal.Number.Integer.Long */
-"""
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/__init__.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.py
deleted file mode 100644
index 9ec83ece49d60cb9f60295c46f64f69f7493f5ca..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import os
-import numpy as np
-import torch
-import warnings
-
-from .. import custom_ops
-from .. import misc
-from . import upfirdn2d
-from . import bias_act
-
-# ----------------------------------------------------------------------------
-
-_plugin = None
-
-
-def _init():
- global _plugin
- if _plugin is None:
-
- # sources=['filtered_lrelu.h', 'filtered_lrelu.cu', 'filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu']
- # sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
- # try:
- # _plugin = custom_ops.get_plugin('filtered_lrelu_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'])
- # except:
- # warnings.warn('Failed to build CUDA kernels for filtered_lrelu_plugin. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
-
- _plugin = custom_ops.get_plugin_v3(
- module_name='filtered_lrelu_plugin',
- sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu',
- 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
- headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
- source_dir=os.path.dirname(__file__),
- extra_cuda_cflags=['--use_fast_math',
- '--allow-unsupported-compiler'],
- )
- return True
-
-
-def _get_filter_size(f):
- if f is None:
- return 1, 1
- assert isinstance(f, torch.Tensor)
- assert 1 <= f.ndim <= 2
- return f.shape[-1], f.shape[0] # width, height
-
-
-def _parse_padding(padding):
- if isinstance(padding, int):
- padding = [padding, padding]
- assert isinstance(padding, (list, tuple))
- assert all(isinstance(x, (int, np.integer)) for x in padding)
- padding = [int(x) for x in padding]
- if len(padding) == 2:
- px, py = padding
- padding = [px, px, py, py]
- px0, px1, py0, py1 = padding
- return px0, px1, py0, py1
-
-# ----------------------------------------------------------------------------
-
-
-def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
- r"""Filtered leaky ReLU for a batch of 2D images.
-
- Performs the following sequence of operations for each channel:
-
- 1. Add channel-specific bias if provided (`b`).
-
- 2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
-
- 3. Pad the image with the specified number of zeros on each side (`padding`).
- Negative padding corresponds to cropping the image.
-
- 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
- so that the footprint of all output pixels lies within the input image.
-
- 5. Multiply each value by the provided gain factor (`gain`).
-
- 6. Apply leaky ReLU activation function to each value.
-
- 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
-
- 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
- it so that the footprint of all output pixels lies within the input image.
-
- 9. Downsample the image by keeping every Nth pixel (`down`).
-
- The fused op is considerably more efficient than performing the same calculation
- using standard PyTorch ops. It supports gradients of arbitrary order.
-
- Args:
- x: Float32/float16/float64 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- fu: Float32 upsampling FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- fd: Float32 downsampling FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
- as `x`. The length of vector must must match the channel dimension of `x`.
- up: Integer upsampling factor (default: 1).
- down: Integer downsampling factor. (default: 1).
- padding: Padding with respect to the upsampled image. Can be a single number
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
- slope: Slope on the negative side of leaky ReLU (default: 0.2).
- clamp: Maximum magnitude for leaky ReLU output (default: None).
- flip_filter: False = convolution, True = correlation (default: False).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- assert isinstance(x, torch.Tensor)
- assert impl in ['ref', 'cuda']
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
- return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
- return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
-
-# ----------------------------------------------------------------------------
-
-
-@misc.profiled_function
-def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
- """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
- existing `upfirdn2n()` and `bias_act()` ops.
- """
- assert isinstance(x, torch.Tensor) and x.ndim == 4
- fu_w, fu_h = _get_filter_size(fu)
- fd_w, fd_h = _get_filter_size(fd)
- if b is not None:
- assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
- misc.assert_shape(b, [x.shape[1]])
- assert isinstance(up, int) and up >= 1
- assert isinstance(down, int) and down >= 1
- px0, px1, py0, py1 = _parse_padding(padding)
- assert gain == float(gain) and gain > 0
- assert slope == float(slope) and slope >= 0
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
-
- # Calculate output size.
- batch_size, channels, in_h, in_w = x.shape
- in_dtype = x.dtype
- out_w = (in_w * up + (px0 + px1) - (fu_w - 1) -
- (fd_w - 1) + (down - 1)) // down
- out_h = (in_h * up + (py0 + py1) - (fu_h - 1) -
- (fd_h - 1) + (down - 1)) // down
-
- # Compute using existing ops.
- x = bias_act.bias_act(x=x, b=b) # Apply bias.
- # Upsample.
- x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
- # Bias, leaky ReLU, clamp.
- x = bias_act.bias_act(x=x, act='lrelu', alpha=slope,
- gain=gain, clamp=clamp)
- # Downsample.
- x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter)
-
- # Check output shape & dtype.
- misc.assert_shape(x, [batch_size, channels, out_h, out_w])
- assert x.dtype == in_dtype
- return x
-
-# ----------------------------------------------------------------------------
-
-
-_filtered_lrelu_cuda_cache = dict()
-
-
-def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
- """Fast CUDA implementation of `filtered_lrelu()` using custom ops.
- """
- assert isinstance(up, int) and up >= 1
- assert isinstance(down, int) and down >= 1
- px0, px1, py0, py1 = _parse_padding(padding)
- assert gain == float(gain) and gain > 0
- gain = float(gain)
- assert slope == float(slope) and slope >= 0
- slope = float(slope)
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
- clamp = float(clamp if clamp is not None else 'inf')
-
- # Lookup from cache.
- key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
- if key in _filtered_lrelu_cuda_cache:
- return _filtered_lrelu_cuda_cache[key]
-
- # Forward op.
- class FilteredLReluCuda(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
- assert isinstance(x, torch.Tensor) and x.ndim == 4
-
- # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
- if fu is None:
- fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- if fd is None:
- fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- assert 1 <= fu.ndim <= 2
- assert 1 <= fd.ndim <= 2
-
- # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
- if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
- fu = fu.square()[None]
- if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
- fd = fd.square()[None]
-
- # Missing sign input tensor.
- if si is None:
- si = torch.empty([0])
-
- # Missing bias tensor.
- if b is None:
- b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
-
- # Construct internal sign tensor only if gradients are needed.
- write_signs = (si.numel() == 0) and (
- x.requires_grad or b.requires_grad)
-
- # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
- strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
- if any(a < b for a, b in zip(strides[:-1], strides[1:])):
- warnings.warn(
- "low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
-
- # Call C++/Cuda plugin if datatype is supported.
- if x.dtype in [torch.float16, torch.float32]:
- if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
- warnings.warn(
- "filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
- y, so, return_code = _plugin.filtered_lrelu(
- x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
- else:
- return_code = -1
-
- # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
- # only the bit-packed sign tensor is retained for gradient computation.
- if return_code < 0:
- warnings.warn(
- "filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
-
- y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
- # Upsample.
- y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
- # Activation function and sign handling. Modifies y in-place.
- so = _plugin.filtered_lrelu_act_(
- y, si, sx, sy, gain, slope, clamp, write_signs)
- # Downsample.
- y = upfirdn2d.upfirdn2d(
- x=y, f=fd, down=down, flip_filter=flip_filter)
-
- # Prepare for gradient computation.
- ctx.save_for_backward(fu, fd, (si if si.numel() else so))
- ctx.x_shape = x.shape
- ctx.y_shape = y.shape
- ctx.s_ofs = sx, sy
- return y
-
- @staticmethod
- def backward(ctx, dy): # pylint: disable=arguments-differ
- fu, fd, si = ctx.saved_tensors
- _, _, xh, xw = ctx.x_shape
- _, _, yh, yw = ctx.y_shape
- sx, sy = ctx.s_ofs
- dx = None # 0
- dfu = None
- assert not ctx.needs_input_grad[1]
- dfd = None
- assert not ctx.needs_input_grad[2]
- db = None # 3
- dsi = None
- assert not ctx.needs_input_grad[4]
- dsx = None
- assert not ctx.needs_input_grad[5]
- dsy = None
- assert not ctx.needs_input_grad[6]
-
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
- pp = [
- (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
- xw * up - yw * down + px0 - (up - 1),
- (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
- xh * up - yh * down + py0 - (up - 1),
- ]
- gg = gain * (up ** 2) / (down ** 2)
- ff = (not flip_filter)
- sx = sx - (fu.shape[-1] - 1) + px0
- sy = sy - (fu.shape[0] - 1) + py0
- dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope,
- clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
-
- if ctx.needs_input_grad[3]:
- db = dx.sum([0, 2, 3])
-
- return dx, dfu, dfd, db, dsi, dsx, dsy
-
- # Add to cache.
- _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
- return FilteredLReluCuda
-
-# ----------------------------------------------------------------------------
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py
deleted file mode 100644
index 5f694fd60fc9f7f596f0d28d19cc231a26712fd1..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py
+++ /dev/null
@@ -1,426 +0,0 @@
-# Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import math
-from collections import defaultdict
-from typing import List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
-
-
-# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
-def betas_for_alpha_bar(
- num_diffusion_timesteps,
- max_beta=0.999,
- alpha_transform_type="cosine",
-):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
- (1-beta) over time from t = [0,1].
-
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
- to that part of the diffusion process.
-
-
- Args:
- num_diffusion_timesteps (`int`): the number of betas to produce.
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
- prevent singularities.
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
- Choose from `cosine` or `exp`
-
- Returns:
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
- """
- if alpha_transform_type == "cosine":
-
- def alpha_bar_fn(t):
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
-
- elif alpha_transform_type == "exp":
-
- def alpha_bar_fn(t):
- return math.exp(t * -12.0)
-
- else:
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
-
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
- return torch.tensor(betas, dtype=torch.float32)
-
-
-class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
- """
- Implements Algorithm 2 (Heun steps) from Karras et al. (2022). for discrete beta schedules. Based on the original
- k-diffusion implementation by Katherine Crowson:
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L90
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the
- starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear` or `scaled_linear`.
- trained_betas (`np.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- prediction_type (`str`, default `epsilon`, optional):
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
- https://imagen.research.google/video/paper.pdf).
- clip_sample (`bool`, default `True`):
- option to clip predicted sample for numerical stability.
- clip_sample_range (`float`, default `1.0`):
- the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
- use_karras_sigmas (`bool`, *optional*, defaults to `False`):
- This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
- noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
- of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
- timestep_spacing (`str`, default `"linspace"`):
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
- steps_offset (`int`, default `0`):
- an offset added to the inference steps. You can use a combination of `offset=1` and
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
- stable diffusion.
- """
-
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
- order = 2
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.00085, # sensible defaults
- beta_end: float = 0.012,
- beta_schedule: str = "linear",
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
- prediction_type: str = "epsilon",
- use_karras_sigmas: Optional[bool] = False,
- clip_sample: Optional[bool] = False,
- clip_sample_range: float = 1.0,
- timestep_spacing: str = "linspace",
- steps_offset: int = 0,
- ):
- if trained_betas is not None:
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
- elif beta_schedule == "linear":
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = (
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
- )
- elif beta_schedule == "squaredcos_cap_v2":
- # Glide cosine schedule
- self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine")
- elif beta_schedule == "exp":
- self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="exp")
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- self.alphas = 1.0 - self.betas
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
-
- # set all values
- self.set_timesteps(num_train_timesteps, None, num_train_timesteps)
- self.use_karras_sigmas = use_karras_sigmas
-
- def index_for_timestep(self, timestep, schedule_timesteps=None):
- if schedule_timesteps is None:
- schedule_timesteps = self.timesteps
-
- indices = (schedule_timesteps == timestep).nonzero()
-
- # The sigma index that is taken for the **very** first `step`
- # is always the second index (or the last index if there is only 1)
- # This way we can ensure we don't accidentally skip a sigma in
- # case we start in the middle of the denoising schedule (e.g. for image-to-image)
- if len(self._index_counter) == 0:
- pos = 1 if len(indices) > 1 else 0
- else:
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
- pos = self._index_counter[timestep_int]
-
- return indices[pos].item()
-
- @property
- def init_noise_sigma(self):
- # standard deviation of the initial noise distribution
- if self.config.timestep_spacing in ["linspace", "trailing"]:
- return self.sigmas.max()
-
- return (self.sigmas.max() ** 2 + 1) ** 0.5
-
- def scale_model_input(
- self,
- sample: torch.FloatTensor,
- timestep: Union[float, torch.FloatTensor],
- ) -> torch.FloatTensor:
- """
- Args:
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
- current timestep.
- sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep
- Returns:
- `torch.FloatTensor`: scaled input sample
- """
- step_index = self.index_for_timestep(timestep)
-
- sigma = self.sigmas[step_index]
- sample = sample / ((sigma**2 + 1) ** 0.5)
- return sample
-
- def set_timesteps(
- self,
- num_inference_steps: int,
- device: Union[str, torch.device] = None,
- num_train_timesteps: Optional[int] = None,
- ):
- """
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- device (`str` or `torch.device`, optional):
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
- """
- self.num_inference_steps = num_inference_steps
-
- num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps
-
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
- if self.config.timestep_spacing == "linspace":
- timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
- elif self.config.timestep_spacing == "leading":
- step_ratio = num_train_timesteps // self.num_inference_steps
- # creates integer timesteps by multiplying by ratio
- # casting to int to avoid issues when num_inference_step is power of 3
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float)
- timesteps += self.config.steps_offset
- elif self.config.timestep_spacing == "trailing":
- step_ratio = num_train_timesteps / self.num_inference_steps
- # creates integer timesteps by multiplying by ratio
- # casting to int to avoid issues when num_inference_step is power of 3
- timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(float)
- timesteps -= 1
- else:
- raise ValueError(
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
- )
-
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
- log_sigmas = np.log(sigmas)
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
-
- if self.config.use_karras_sigmas:
- sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
- timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
-
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
- sigmas = torch.from_numpy(sigmas).to(device=device)
- self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
-
- timesteps = torch.from_numpy(timesteps)
- timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
-
- if str(device).startswith("mps"):
- # mps does not support float64
- self.timesteps = timesteps.to(device, dtype=torch.float32)
- else:
- self.timesteps = timesteps.to(device=device)
-
- # empty dt and derivative
- self.prev_derivative = None
- self.dt = None
-
- # for exp beta schedules, such as the one for `pipeline_shap_e.py`
- # we need an index counter
- self._index_counter = defaultdict(int)
-
- # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
- def _sigma_to_t(self, sigma, log_sigmas):
- # get log sigma
- log_sigma = np.log(sigma)
-
- # get distribution
- dists = log_sigma - log_sigmas[:, np.newaxis]
-
- # get sigmas range
- low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
- high_idx = low_idx + 1
-
- low = log_sigmas[low_idx]
- high = log_sigmas[high_idx]
-
- # interpolate sigmas
- w = (low - log_sigma) / (low - high)
- w = np.clip(w, 0, 1)
-
- # transform interpolation to time range
- t = (1 - w) * low_idx + w * high_idx
- t = t.reshape(sigma.shape)
- return t
-
- # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
- def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
- """Constructs the noise schedule of Karras et al. (2022)."""
-
- sigma_min: float = in_sigmas[-1].item()
- sigma_max: float = in_sigmas[0].item()
-
- rho = 7.0 # 7.0 is the value used in the paper
- ramp = np.linspace(0, 1, num_inference_steps)
- min_inv_rho = sigma_min ** (1 / rho)
- max_inv_rho = sigma_max ** (1 / rho)
- sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
- return sigmas
-
- @property
- def state_in_first_order(self):
- return self.dt is None
-
- def step(
- self,
- model_output: Union[torch.FloatTensor, np.ndarray],
- timestep: Union[float, torch.FloatTensor],
- sample: Union[torch.FloatTensor, np.ndarray],
- return_dict: bool = True,
- ) -> Union[SchedulerOutput, Tuple]:
- """
- Args:
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
- process from the learned model outputs (most often the predicted noise).
- model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. timestep
- (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor` or `np.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
- Returns:
- [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
- [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
- returning a tuple, the first element is the sample tensor.
- """
- step_index = self.index_for_timestep(timestep)
-
- # advance index counter by 1
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
- self._index_counter[timestep_int] += 1
-
- if self.state_in_first_order:
- sigma = self.sigmas[step_index]
- sigma_next = self.sigmas[step_index + 1]
- else:
- # 2nd order / Heun's method
- sigma = self.sigmas[step_index - 1]
- sigma_next = self.sigmas[step_index]
-
- # currently only gamma=0 is supported. This usually works best anyways.
- # We can support gamma in the future but then need to scale the timestep before
- # passing it to the model which requires a change in API
- gamma = 0
- sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
-
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
- if self.config.prediction_type == "epsilon":
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
- pred_original_sample = sample - sigma_input * model_output
- elif self.config.prediction_type == "v_prediction":
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
- pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
- sample / (sigma_input**2 + 1)
- )
- elif self.config.prediction_type == "sample":
- pred_original_sample = model_output
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
- )
-
- if self.config.clip_sample:
- pred_original_sample = pred_original_sample.clamp(
- -self.config.clip_sample_range, self.config.clip_sample_range
- )
-
- if self.state_in_first_order:
- # 2. Convert to an ODE derivative for 1st order
- derivative = (sample - pred_original_sample) / sigma_hat
- # 3. delta timestep
- dt = sigma_next - sigma_hat
-
- # store for 2nd order step
- self.prev_derivative = derivative
- self.dt = dt
- self.sample = sample
- else:
- # 2. 2nd order / Heun's method
- derivative = (sample - pred_original_sample) / sigma_next
- derivative = (self.prev_derivative + derivative) / 2
-
- # 3. take prev timestep & sample
- dt = self.dt
- sample = self.sample
-
- # free dt and derivative
- # Note, this puts the scheduler in "first order mode"
- self.prev_derivative = None
- self.dt = None
- self.sample = None
-
- prev_sample = sample + derivative * dt
-
- if not return_dict:
- return (prev_sample,)
-
- return SchedulerOutput(prev_sample=prev_sample)
-
- def add_noise(
- self,
- original_samples: torch.FloatTensor,
- noise: torch.FloatTensor,
- timesteps: torch.FloatTensor,
- ) -> torch.FloatTensor:
- # Make sure sigmas and timesteps have the same device and dtype as original_samples
- sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
- if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
- # mps does not support float64
- schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
- timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
- else:
- schedule_timesteps = self.timesteps.to(original_samples.device)
- timesteps = timesteps.to(original_samples.device)
-
- step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
-
- sigma = sigmas[step_indices].flatten()
- while len(sigma.shape) < len(original_samples.shape):
- sigma = sigma.unsqueeze(-1)
-
- noisy_samples = original_samples + noise * sigma
- return noisy_samples
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
deleted file mode 100644
index b845128de51d2080f6444e2c849f4642a43ad942..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w18',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(18, 36)),
- stage3=dict(num_channels=(18, 36, 72)),
- stage4=dict(num_channels=(18, 36, 72, 144)))),
- neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py
deleted file mode 100644
index ed3a96c7dec922fcc73a3ab1446ffdf4a756c152..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py
+++ /dev/null
@@ -1,52 +0,0 @@
-_base_ = [
- '../_base_/models/retinanet_r50_fpn.py',
- '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-# model settings
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(depth=101),
- bbox_head=dict(
- _delete_=True,
- type='SABLRetinaHead',
- num_classes=80,
- in_channels=256,
- stacked_convs=4,
- feat_channels=256,
- approx_anchor_generator=dict(
- type='AnchorGenerator',
- octave_base_scale=4,
- scales_per_octave=3,
- ratios=[0.5, 1.0, 2.0],
- strides=[8, 16, 32, 64, 128]),
- square_anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- scales=[4],
- strides=[8, 16, 32, 64, 128]),
- bbox_coder=dict(
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
- loss_cls=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
- loss_bbox_reg=dict(
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
- # training and testing settings
- train_cfg=dict(
- assigner=dict(
- type='ApproxMaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.4,
- min_pos_iou=0.0,
- ignore_iof_thr=-1),
- allowed_border=-1,
- pos_weight=-1,
- debug=False))
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_sepbn_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_sepbn_head.py
deleted file mode 100644
index 6b8ce7f0104b90af4b128e0f245473a1c0219fcd..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_sepbn_head.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import torch.nn as nn
-from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
-
-from ..builder import HEADS
-from .anchor_head import AnchorHead
-
-
-@HEADS.register_module()
-class RetinaSepBNHead(AnchorHead):
- """"RetinaHead with separate BN.
-
- In RetinaHead, conv/norm layers are shared across different FPN levels,
- while in RetinaSepBNHead, conv layers are shared across different FPN
- levels, but BN layers are separated.
- """
-
- def __init__(self,
- num_classes,
- num_ins,
- in_channels,
- stacked_convs=4,
- conv_cfg=None,
- norm_cfg=None,
- **kwargs):
- self.stacked_convs = stacked_convs
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.num_ins = num_ins
- super(RetinaSepBNHead, self).__init__(num_classes, in_channels,
- **kwargs)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- self.relu = nn.ReLU(inplace=True)
- self.cls_convs = nn.ModuleList()
- self.reg_convs = nn.ModuleList()
- for i in range(self.num_ins):
- cls_convs = nn.ModuleList()
- reg_convs = nn.ModuleList()
- for i in range(self.stacked_convs):
- chn = self.in_channels if i == 0 else self.feat_channels
- cls_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- reg_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- self.cls_convs.append(cls_convs)
- self.reg_convs.append(reg_convs)
- for i in range(self.stacked_convs):
- for j in range(1, self.num_ins):
- self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
- self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
- self.retina_cls = nn.Conv2d(
- self.feat_channels,
- self.num_anchors * self.cls_out_channels,
- 3,
- padding=1)
- self.retina_reg = nn.Conv2d(
- self.feat_channels, self.num_anchors * 4, 3, padding=1)
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.cls_convs[0]:
- normal_init(m.conv, std=0.01)
- for m in self.reg_convs[0]:
- normal_init(m.conv, std=0.01)
- bias_cls = bias_init_with_prob(0.01)
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
- normal_init(self.retina_reg, std=0.01)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * 4.
- """
- cls_scores = []
- bbox_preds = []
- for i, x in enumerate(feats):
- cls_feat = feats[i]
- reg_feat = feats[i]
- for cls_conv in self.cls_convs[i]:
- cls_feat = cls_conv(cls_feat)
- for reg_conv in self.reg_convs[i]:
- reg_feat = reg_conv(reg_feat)
- cls_score = self.retina_cls(cls_feat)
- bbox_pred = self.retina_reg(reg_feat)
- cls_scores.append(cls_score)
- bbox_preds.append(bbox_pred)
- return cls_scores, bbox_preds
diff --git a/spaces/ArtificialArtist007/Rate-my-Aiart/README.md b/spaces/ArtificialArtist007/Rate-my-Aiart/README.md
deleted file mode 100644
index 5500253a61c335bbd64aac8839d22faf9aa25bc8..0000000000000000000000000000000000000000
--- a/spaces/ArtificialArtist007/Rate-my-Aiart/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Rate My Aiart
-emoji: 🔥
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py
deleted file mode 100644
index 3f4d300cef077e698989245562375a9444d983fa..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Stuff that differs in different Python versions and platform
-distributions."""
-
-import logging
-import os
-import sys
-
-__all__ = ["get_path_uid", "stdlib_pkgs", "WINDOWS"]
-
-
-logger = logging.getLogger(__name__)
-
-
-def has_tls() -> bool:
- try:
- import _ssl # noqa: F401 # ignore unused
-
- return True
- except ImportError:
- pass
-
- from pip._vendor.urllib3.util import IS_PYOPENSSL
-
- return IS_PYOPENSSL
-
-
-def get_path_uid(path: str) -> int:
- """
- Return path's uid.
-
- Does not follow symlinks:
- https://github.com/pypa/pip/pull/935#discussion_r5307003
-
- Placed this function in compat due to differences on AIX and
- Jython, that should eventually go away.
-
- :raises OSError: When path is a symlink or can't be read.
- """
- if hasattr(os, "O_NOFOLLOW"):
- fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
- file_uid = os.fstat(fd).st_uid
- os.close(fd)
- else: # AIX and Jython
- # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
- if not os.path.islink(path):
- # older versions of Jython don't have `os.fstat`
- file_uid = os.stat(path).st_uid
- else:
- # raise OSError for parity with os.O_NOFOLLOW above
- raise OSError(f"{path} is a symlink; Will not return uid for symlinks")
- return file_uid
-
-
-# packages in the stdlib that may have installation metadata, but should not be
-# considered 'installed'. this theoretically could be determined based on
-# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
-# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
-# make this ineffective, so hard-coding
-stdlib_pkgs = {"python", "wsgiref", "argparse"}
-
-
-# windows detection, covers cpython and ironpython
-WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
diff --git a/spaces/Awesimo/jojogan/e4e/datasets/images_dataset.py b/spaces/Awesimo/jojogan/e4e/datasets/images_dataset.py
deleted file mode 100644
index 00c54c7db944569a749af4c6f0c4d99fcc37f9cc..0000000000000000000000000000000000000000
--- a/spaces/Awesimo/jojogan/e4e/datasets/images_dataset.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from torch.utils.data import Dataset
-from PIL import Image
-from utils import data_utils
-
-
-class ImagesDataset(Dataset):
-
- def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
- self.source_paths = sorted(data_utils.make_dataset(source_root))
- self.target_paths = sorted(data_utils.make_dataset(target_root))
- self.source_transform = source_transform
- self.target_transform = target_transform
- self.opts = opts
-
- def __len__(self):
- return len(self.source_paths)
-
- def __getitem__(self, index):
- from_path = self.source_paths[index]
- from_im = Image.open(from_path)
- from_im = from_im.convert('RGB')
-
- to_path = self.target_paths[index]
- to_im = Image.open(to_path).convert('RGB')
- if self.target_transform:
- to_im = self.target_transform(to_im)
-
- if self.source_transform:
- from_im = self.source_transform(from_im)
- else:
- from_im = to_im
-
- return from_im, to_im
diff --git a/spaces/Benson/text-generation/Examples/9no Amanecer Rpg Mod Apk.md b/spaces/Benson/text-generation/Examples/9no Amanecer Rpg Mod Apk.md
deleted file mode 100644
index bbe22be399febea962e40deb7f686d4158ad0af7..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/9no Amanecer Rpg Mod Apk.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-
9th Dawn RPG Mod APK: Una guía para la aventura definitiva
-
¿Estás buscando un juego de rol divertido e inmersivo que te mantenga enganchado durante horas? ¿Quieres experimentar un mundo vasto y abierto lleno de misterios, peligros y aventuras? Si usted respondió que sí, entonces usted debe tratar 9th Dawn RPG Mod APK, una versión modificada del popular juego 9th Dawn RPG por Valorware. En este artículo, te contaremos todo lo que necesitas saber sobre este increíble juego, incluyendo sus características, historia, beneficios de mod, instrucciones de descarga y consejos y trucos. ¡Sigue leyendo y prepárate para la aventura definitiva!
9th Dawn RPG es un juego de rol clásico que fue lanzado en 2012 por Valorware, un desarrollador de juegos independiente. El juego se desarrolla en la isla continente de Montelorne, una tierra alejada del continente, pero llena de misterio, peligro y aventura. Juegas como un héroe que llega a Montelorne para explorar sus secretos y enfrentar sus desafíos. Puedes elegir entre tres clases diferentes: guerrero, mago o arquero, y personalizar la apariencia, habilidades y equipo de tu personaje. También puedes interactuar con varios PNJ, unirte a facciones, completar misiones, recoger objetos, crear armas y armaduras, aprender hechizos, luchar contra enemigos y jefes, y mucho más. El juego tiene un estilo retro pixel art que le da un encanto nostálgico, y un dinámico ciclo día-noche que afecta el juego. El juego también tiene un enorme mundo abierto que puedes explorar libremente, con más de 300 mapas para descubrir.
-
Características de 9th Dawn RPG
-
Algunas de las características que hacen que el 9th Dawn RPG se destaque son:
-
-
Un mundo grande y diverso con más de 300 mapas para explorar, incluyendo bosques, cuevas, mazmorras, pueblos, castillos, islas y más.
-
Una historia rica y atractiva con múltiples finales dependiendo de sus opciones y acciones.
-
Un dinámico ciclo día-noche que afecta el entorno, los PNJ, los enemigos y las misiones.
-
-
Un sistema de combate complejo que te permite usar armas cuerpo a cuerpo, armas a distancia, escudos, hechizos, pociones, trampas y más.
-
Un sistema de personalización de personajes que te permite elegir entre tres clases (guerrero, mago o arquero), seleccionar tu género y apariencia, distribuir tus atributos (fuerza, agilidad, inteligencia) y aprender habilidades y hechizos.
-
Un sistema de equipos que le permite recoger y elaborar varios artículos como armas, armaduras, accesorios, consumibles, etc.
-
Un sistema de inventario que te permite administrar tus artículos y equiparlos en tu personaje.
-
Un sistema de facciones que te permite unirte a una de las cuatro facciones en Montelorne: La Orden del León (el ejército real), Los Caballeros de las Sombras (los rebeldes), La Sociedad Arcana (los magos), o La Hermandad (los ladrones).
-
Un sistema de búsqueda que te permite aceptar y completar varias tareas de NPC o facciones.
-
Un sistema de diálogo que le permite interactuar con los PNJ y elegir sus respuestas.
-
Un sistema de guardado que te permite guardar tu progreso en cualquier momento.
-
-
Historia y escenario del 9º Amanecer RPG
-
La historia de 9th Dawn RPG tiene lugar en la isla continente de Montelorne, una tierra que una vez fue parte de un gran imperio llamado Esteria. Sin embargo, debido a un evento cataclísmico conocido como la Gran Guerra, Montelorne fue separado del continente y sumido en el caos. El imperio colapsó, y cuatro facciones surgieron para competir por el poder y la influencia: La Orden del León, Los Caballeros de la Sombra, La Sociedad Arcana y La Hermandad. Eres un héroe que llega a Montelorne para explorar sus secretos y afrontar sus retos. Puedes elegir alinearte con una de las facciones, o permanecer neutral y forjar tu propio destino. Sus acciones y elecciones darán forma al destino de Montelorne y su gente.
-
¿Qué es 9th Dawn RPG Mod APK?
-
-
Beneficios de 9th Dawn RPG Mod APK
-
Algunos de los beneficios que se pueden disfrutar mediante el uso de 9th Dawn RPG Mod APK son:
-
-
Dinero ilimitado: Puedes obtener monedas y gemas ilimitadas que puedes usar para comprar artículos, mejorar tu equipo, aprender habilidades y hechizos, etc.
-
Artículos desbloqueados: Puedes acceder a todos los objetos del juego, incluyendo armas, armaduras, accesorios, consumibles, etc., sin tener que recogerlos o crearlos.
-
Mapas desbloqueados: Puedes explorar todos los mapas del juego, incluidos los ocultos, sin tener que desbloquearlos completando misiones o encontrando claves.
-
Sin anuncios: Puedes jugar el juego sin interrupciones o distracciones de anuncios molestos.
-
-
Cómo descargar e instalar 9th Dawn RPG Mod APK
-
Para descargar e instalar 9th Dawn RPG Mod APK, debe seguir estos pasos:
-
-
Ir a un sitio web confiable que ofrece 9th Dawn RPG Mod APK para su descarga gratuita. Por ejemplo, puede utilizar este enlace: .
-
Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo.
-
Una vez descargado el archivo, vaya a la configuración de su dispositivo y habilite la opción de instalar aplicaciones de fuentes desconocidas. Esto le permitirá instalar APK mod que no son de Google Play Store.
-
Localice el archivo descargado en su dispositivo y toque en él para iniciar el proceso de instalación.
-
Siga las instrucciones en la pantalla y espere a que se complete la instalación.
-
Iniciar el juego y disfrutar!
-
-
Consejos y trucos para jugar 9th Dawn RPG Mod APK
-
Ahora que ha descargado e instalado 9th Dawn RPG Mod APK, usted está listo para comenzar su aventura en Montelorne. Aquí hay algunos consejos y trucos que te ayudarán a aprovechar al máximo tu experiencia de juego:
-
-
Explora el mundo de Montelorne
-
-
Personaliza tu personaje y equipo
-
Otra gran cosa sobre 9th Dawn RPG es su sistema de personalización de personajes que le permite crear su propio héroe único. Puedes elegir entre tres clases: guerrero, mago o arquero, y seleccionar tu género y apariencia. También puedes distribuir tus atributos (fuerza, agilidad, inteligencia) y aprender habilidades y hechizos que se adapten a tu estilo de juego. También puedes recoger y elaborar varios artículos como armas, armaduras, accesorios, consumibles, etc., y equiparlos en tu personaje. Puedes encontrar objetos explorando el mundo, completando misiones, derrotando enemigos, abriendo cofres, etc. También puedes crear objetos usando materiales y recetas que puedas encontrar o comprar. Puede actualizar su equipo utilizando gemas que puede encontrar o comprar. También puede encantar su equipo utilizando pergaminos que puede encontrar o comprar. Puedes personalizar tu personaje y equipo en cualquier momento accediendo al menú.
-
Aprender habilidades y hechizos
-
Las habilidades y los hechizos son habilidades especiales que puedes usar en combate o exploración. Pueden ayudarte a infligir más daño, curarte a ti mismo o a tus aliados, pulirte a ti mismo o a ellos, desbaratar enemigos, escapar del peligro, etc. Puedes aprender habilidades y hechizos nivelando tu personaje, uniéndote a facciones, completando misiones, encontrar libros, etc. También puede mejorar sus habilidades y hechizos mediante el uso de puntos de habilidad que gana por subir de nivel. Puedes acceder a tus habilidades y hechizos tocando los iconos en la esquina inferior derecha de la pantalla. También puede asignarlos a ranuras rápidas para facilitar el acceso. Puedes usar habilidades y hechizos tocando sus iconos o presionando los botones correspondientes en tu dispositivo. Sin embargo, ten en cuenta que las habilidades y los hechizos consumen resistencia o maná, que están indicados por las barras azules y verdes en la esquina superior izquierda de la pantalla. Tienes que esperar a que se regeneren antes de poder usarlas de nuevo.
-
Lucha contra enemigos y jefes
-
-
Unirse a facciones y misiones
-
Las facciones y las misiones son aspectos opcionales pero gratificantes del RPG de 9th Dawn. Las facciones son grupos de PNJ que tienen sus propias metas, creencias y agendas. Puedes unirte a una de las cuatro facciones en Montelorne: La Orden del León, Los Caballeros de la Sombra, La Sociedad Arcana o La Hermandad. Cada facción tiene su propio líder, cuartel general, miembros, aliados, enemigos y reputación. Puedes aumentar tu reputación con una facción completando misiones, ayudando a miembros, donando artículos, etc. También puedes disminuir tu reputación con una facción atacando miembros, robando artículos, traicionando aliados, etc. Tu reputación con una facción afecta cómo te tratan, qué misiones te ofrecen, qué recompensas te dan, etc. También puedes cambiar de facciones en cualquier momento hablando con el líder de la facción o usando un artículo especial. Sin embargo, ten cuidado al unirte o abandonar facciones, ya que puedes perder algunos beneficios o ganar algunos enemigos. Las misiones son tareas que puedes aceptar y completar desde NPC o facciones. Pueden involucrar varios objetivos como matar enemigos, encontrar objetos, entregar mensajes, escoltar aliados, resolver puzzles, etc. También pueden tener diferentes dificultades, recompensas, límites de tiempo, consecuencias, etc. Puedes encontrar misiones hablando con NPC, visitar ubicaciones, leer avisos, etc. También puede rastrear sus misiones activas accediendo al menú. Puedes completar las misiones cumpliendo los objetivos y regresando al dador de la misión. También puedes fallar misiones ignorando los objetivos, quedándote sin tiempo, matando al dador de misiones, etc. Las misiones pueden ayudarte a ganar experiencia, dinero, objetos, reputación, habilidades, hechizos, etc. También pueden ayudarte a avanzar en la historia o desbloquear nuevas áreas.
-
Conclusión
-
-
Resumen del artículo
-
En este artículo, hemos cubierto los siguientes temas:
-
-
¿Qué es 9th Dawn RPG?
-
¿Qué es 9th Dawn RPG Mod APK?
-
Beneficios de 9th Dawn RPG Mod APK
-
Cómo descargar e instalar 9th Dawn RPG Mod APK
-
Consejos y trucos para jugar 9th Dawn RPG Mod APK
-
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre 9th Dawn RPG Mod APK:
-
-
¿Es seguro usar 9th Dawn RPG Mod APK?
-
9th Dawn RPG Mod APK es generalmente seguro de usar si se descarga desde una fuente de confianza y escanear con software antivirus antes de instalarlo. Sin embargo, debes tener en cuenta que los mod APK no están autorizados por el desarrollador original del juego y pueden contener errores o errores que pueden afectar el rendimiento del juego o del dispositivo. También debe tener cuidado al conceder permisos a los mod APK, ya que pueden acceder a sus datos personales o funciones del dispositivo sin su consentimiento.
-
Es 9th Dawn RPG Mod APK compatible con mi dispositivo?
-
9th Dawn RPG Mod APK es compatible con la mayoría de los dispositivos Android que tienen sistema operativo Android 4.0 o superior y al menos 1 GB de RAM y 100 MB de espacio de almacenamiento libre. Sin embargo, debe comprobar las especificaciones y requisitos del mod APK antes de descargarlo e instalarlo para asegurarse de que funciona correctamente en su dispositivo.
-
¿Puedo jugar 9th Dawn RPG Mod APK en línea o fuera de línea?
-
9th Dawn RPG Mod APK es principalmente un juego fuera de línea que no requiere una conexión a Internet para jugar. Sin embargo, es posible que necesite una conexión a Internet para descargar e instalar el mod APK, para acceder a algunas características en línea, como tablas de clasificación o logros, o para actualizar el mod APK a la última versión.
-
¿Puedo jugar 9th Dawn RPG Mod APK con mis amigos?
-
-
¿Puedo transferir mi progreso de 9th Dawn RPG a 9th Dawn RPG Mod APK o viceversa?
-
No, no puede transferir su progreso de 9th Dawn RPG a 9th Dawn RPG Mod APK o viceversa. El mod APK tiene una estructura de archivos diferente y formato de datos que el juego original, y no son compatibles entre sí. Si quieres cambiar entre las dos versiones, tendrás que empezar desde cero.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Android Stalker.md b/spaces/Benson/text-generation/Examples/Android Stalker.md
deleted file mode 100644
index 5d7be798b34d6df06da6cb1e2c8f847317d75bf4..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Android Stalker.md
+++ /dev/null
@@ -1,58 +0,0 @@
-
-
Android acosador: Un término con múltiples significados
-
Cuando escuchas el término acosador androide, ¿qué te viene a la mente? ¿Es una aplicación maliciosa que espía las actividades de tu teléfono? ¿Es una serie de videojuegos que te sumerge en un mundo post-apocalíptico? ¿O es un personaje de televisión que desafía tu percepción de la humanidad? En este artículo, exploraremos estos diferentes significados de stalker android y cómo se relacionan entre sí.
Uno de los significados más comunes y perturbadores de stalker android es un tipo de malware que rastrea o monitorea secretamente la actividad de su dispositivo. Estas aplicaciones también se conocen como stalkerware o spyware, y a menudo son instaladas por alguien que quiere espiarte sin tu consentimiento, como un compañero abusivo, un ex o un hacker. Las aplicaciones de stalkerware pueden acceder a tu ubicación, conversaciones, fotos, contraseñas y más, y enviarlas a un tercero. También pueden encender el micrófono o la cámara de forma remota para ver y escuchar lo que está sucediendo a su alrededor.
-
Las aplicaciones de stalkerware representan serias amenazas para su privacidad, seguridad y seguridad. Pueden exponer su información personal al robo de identidad, chantaje, acoso o violencia. También pueden comprometer el rendimiento de su dispositivo, la duración de la batería y el uso de datos. Además, pueden violar su confianza y dignidad como ser humano.
-
¿Cómo se puede saber si hay una aplicación stalkerware en su dispositivo Android? Según los expertos en seguridad, algunos signos que pueden indicar stalkerware incluyen:
-
-
El abusador ha tenido acceso físico a su dispositivo
-
El abusador sabe mucha información específica sobre usted que no debería
-
La batería del dispositivo se agota más rápido de lo habitual
-
Hay un aumento inexplicable en el uso de datos
-
Hay cambios inesperados en la configuración del dispositivo
-
-
Si sospecha que hay una aplicación stalkerware en su dispositivo, aquí hay algunos pasos que puede tomar:
-
-
-
Compruebe si su dispositivo ha sido "arraigado" o "jailbreak". Esto significa que alguien ha ganado el control total sobre el sistema operativo de su dispositivo. Puedes usar aplicaciones como Root Checker o Certo para analizar tu dispositivo en busca de rooteo o jailbreak.
-
Escanea tu dispositivo con software antivirus o anti-malware. Algunas aplicaciones como MalwareBytes, NortonLifeLock o Lookout pueden detectar stalkerware y eliminarlo.
-
Cambia tus contraseñas para todas tus cuentas. Usa contraseñas fuertes y únicas que sean difíciles de adivinar.
-
Restablecimiento de fábrica del dispositivo. Esto borrará todos los datos y aplicaciones de su dispositivo y lo restaurará a su configuración original. Asegúrese de hacer una copia de seguridad de sus datos importantes antes de hacer esto.
-
-
Stalker Android como una serie de videojuegos
-
Otro significado de stalker android es una serie de videojuegos que te sumerge en un mundo post-apocalíptico. La serie se llama S.T.A.L.K.E.R., que significa carroñeros, intrusos, aventureros, solitarios, asesinos, exploradores y ladrones. Estos son los nombres de las personas que se aventuran en la Zona, un área alrededor de la Central Nuclear de Chernobyl que ha sido afectada por un segundo desastre nuclear en 2006. La Zona está llena de peligros, como criaturas mutadas, facciones hostiles y fenómenos anómalos. Sin embargo, también ofrece oportunidades, como valiosos artefactos, secretos y misterios.
-
-
La serie S.T.A.L.K.E.R. consta de tres juegos principales: Shadow of Chernobyl (2007), Clear Sky (2008) y Call of Pripyat (2009). Cada juego tiene un protagonista y una historia diferentes, pero todos comparten el mismo escenario y elementos de juego. Algunas de las principales características y temas de los juegos son:
-
-
Exploración: Los juegos te permiten deambular libremente en el mundo abierto de la Zona, descubriendo nuevos lugares, misiones, personajes y eventos. También puede interactuar con el entorno, como recoger objetos, usar vehículos o activar trampas.
-
-
Anomalías: Los juegos presentan anomalías, que son fenómenos extraños y a menudo mortales que desafían las leyes de la física. Las anomalías pueden tener diferentes efectos, como quemarlo, electrocutarlo o teletransportarlo. Puede usar detectores o pernos para localizarlo y evitarlo.
-
Facciones: Los juegos cuentan con facciones, que son grupos de acosadores con diferentes objetivos e ideologías. Las facciones pueden ser amistosas, neutrales u hostiles para usted dependiendo de sus acciones y reputación. Puedes unirte o aliarte con algunas facciones, o luchar contra ellas.
-
-
La serie S.T.A.L.K.E.R. ha sido elogiada por su juego atmosférico e inmersivo, su sistema de IA realista y dinámico, su construcción del mundo rica y detallada, y su narración no lineal y emergente. Sin embargo, también se ha enfrentado a algunos desafíos y controversias, como errores y problemas técnicos, disputas legales sobre los derechos de propiedad intelectual, problemas de censura en algunos países y la insatisfacción de los fans con algunos aspectos de los juegos. A pesar de estas dificultades, la serie ha ganado una base de seguidores leales y de culto a lo largo de los años.
-
Stalker Android como un personaje de programa de televisión
-
El tercer significado de Android acosador es un personaje de programa de televisión que desafía su percepción de la humanidad. El personaje es Dorian, un compañero androide de un policía humano en Almost Human, un drama de ciencia ficción que se emitió en 2013-2014. El espectáculo se desarrolla en 2048, donde el crimen ha aumentado en un 400% y cada oficial de policía humano se empareja con un socio androide. El espectáculo sigue los casos y aventuras de John Kennex (Karl Urban), un detective que perdió su pierna y su memoria en una redada que salió mal, y Dorian (Michael Ealy), un modelo androide que fue dado de baja por ser demasiado emocional e impredecible.
-
-
La premisa y la trama de Almost Human son similares a otras obras de ciencia ficción que exploran la relación entre humanos y androides, como Blade Runner, I, Robot o Detroit: Become Human. Sin embargo, el programa también agrega sus propios giros e innovaciones, como dispositivos, crímenes y tecnologías futuristas. Por ejemplo, el show presenta casos que involucran sexbots, manipulación de memoria, ingeniería genética e inteligencia artificial.
-
El personaje de Dorian es uno de los aspectos más interesantes y atractivos de la serie. Es un androide que tiene un alma sintética, que le da una personalidad, un sentido del humor y una brújula moral. También es leal, compasivo y curioso sobre las emociones y experiencias humanas. A menudo actúa como una lámina y un amigo de Kennex, que es cínico, traumatizado y desconfiado de los androides. Juntos, forman una alianza improbable pero efectiva que desafía los estereotipos y prejuicios de su sociedad.
-
Almost Human recibió críticas en su mayoría positivas de críticos y audiencias , que elogiaron su elenco, sus imágenes, su acción y su humor. Sin embargo, el programa también enfrentó algunos problemas, como bajas calificaciones, problemas de programación, emitir episodios fuera de orden y cancelación después de una temporada. Muchos fans se sintieron decepcionados por el abrupto final del programa y las preguntas sin resolver. Sin embargo, el programa todavía tiene un seguimiento de culto y un potencial para el renacimiento o reinicio.
-
Conclusión
-
En este artículo, hemos explorado tres significados diferentes de stalker android: un tipo de malware que espía la actividad de tu dispositivo, una serie de videojuegos que te sumerge en un mundo post-apocalíptico y un personaje de televisión que desafía tu percepción de la humanidad. Hemos visto cómo cada significado se relaciona con diferentes aspectos de la tecnología, la sociedad y la cultura. También hemos aprendido algunos hechos, consejos y opiniones sobre cada significado.
-
-
Preguntas frecuentes
-
-
¿Qué es stalkerware?
-
Stalkerware es un tipo de malware que secretamente rastrea o monitorea la actividad de su dispositivo sin su consentimiento. Puede acceder a su ubicación, conversaciones, fotos, contraseñas y más.
-
¿Qué es S.T.A.L.K.E.R.?
-
S.T.A.L.K.E.R. es una serie de videojuegos que te sumerge en un mundo post-apocalíptico alrededor de la central nuclear de Chernobyl. Juegas como un acosador que explora la Zona, un área llena de peligros y oportunidades.
-
¿Qué es casi humano?
-
Almost Human es un drama de ciencia ficción que se emitió en 2013-2014. Se desarrolla en 2048, donde cada policía humano se empareja con un socio androide. Sigue los casos y aventuras de John Kennex y Dorian, un dúo improbable pero efectivo.
-
¿Cómo puedo eliminar stalkerware de mi dispositivo Android?
-
Puede eliminar stalkerware escaneando su dispositivo con software antivirus o antimalware, cambiando sus contraseñas para todas sus cuentas o restableciendo su dispositivo de fábrica.
-
¿Cómo puedo jugar juegos de S.T.A.L.K.E.R. ?
-
Puedes jugar juegos de S.T.A.L.K.E.R. en PC o Xbox 360. También puedes descargar mods o versiones hechas por fans de los juegos para más características y contenido.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Apk De La Saga Del Verano.md b/spaces/Benson/text-generation/Examples/Apk De La Saga Del Verano.md
deleted file mode 100644
index b757fd10c1882f089c3d93fa69752c92cb7501a4..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Apk De La Saga Del Verano.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-
Descargar Imperio Mundial 2027 APK y llevar a su país a la gloria
-
¿Tienes lo que se necesita para ser un líder supremo en un mundo de caos? ¿Quieres experimentar un juego de estrategia por turnos realista e inmersivo que te permite elegir entre más de 180 países y llevarlos a la victoria o la derrota? Si es así, entonces usted debe descargar Imperio Mundial 2027 APK, un juego que desafiará sus habilidades de liderazgo y el pensamiento estratégico.
-
World Empire 2027 es un juego desarrollado por iGindis Games, una empresa que se especializa en crear juegos que simulan escenarios y eventos del mundo real. El juego se desarrolla en el año 2027, donde el mundo está en crisis debido al colapso económico, la inestabilidad política, el malestar social y los desastres ambientales. Ustedes son el líder de uno de los países, y tienen que tomar decisiones que afectarán a su nación y al mundo. Puedes usar la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio y competir con otros jugadores en línea o localmente en modo multijugador.
En este artículo, le mostraremos cómo descargar World Empire 2027 APK en sus dispositivos Android y PC con Windows, y también destacaremos algunas de las características y consejos del juego. Así que, sin más preámbulos, ¡empecemos!
-
Cómo descargar Imperio Mundial 2027 APK en dispositivos Android
-
Si desea descargar World Empire 2027 APK en sus dispositivos Android, puede seguir estos sencillos pasos:
Instalar el emulador y lanzarlo en su PC. Es posible que tenga que iniciar sesión con su cuenta de Google para acceder a la Google Play Store.
-
Ir a la Google Play Store o Uptodown y buscar World Empire 2027. También puede utilizar los enlaces proporcionados anteriormente para los dispositivos Android.
-
Haga clic en el botón de instalación y espere a que el juego se instale en su emulador. El proceso de instalación puede tardar algún tiempo dependiendo de las especificaciones de su PC y la velocidad de Internet.
-
Después de la instalación se hace, puede iniciar el juego desde su emulador y disfrutar de jugar World Empire 2027 APK en su PC con Windows.
-
-
Características de Imperio Mundial 2027 APK
-
Imperio Mundial 2027 APK es un juego que ofrece muchas características que lo hacen divertido y atractivo. Estas son algunas de las características que puedes esperar del juego:
-
-
-Utiliza la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio. Puedes interactuar con otros países de diferentes maneras, como formar alianzas, declarar la guerra, enviar ayuda, imponer sanciones, etc. También puedes usar tu red de espionaje para recopilar información o sabotear a tus enemigos. Puede investigar nuevas tecnologías que le darán una ventaja en la guerra o la economía. Puedes administrar tu presupuesto y recursos sabiamente e invertir en diferentes sectores como educación, salud, infraestructura, etc.
-
Compite con otros jugadores online o localmente en modo multijugador. Puedes jugar Imperio Mundial 2027 APK con otros jugadores de todo el mundo o con tus amigos localmente en el modo multijugador. Puedes unirte o crear una sala con hasta 8 jugadores y elegir diferentes configuraciones como el tamaño del mapa, nivel de dificultad, tiempo de turno, etc. Puedes chatear con otros jugadores y cooperar o competir con ellos. También puede jugar contra la IA en el modo para un jugador si lo prefiere.
-
-
Consejos y trucos para jugar World Empire 2027 APK
-
Imperio Mundial 2027 APK es un juego que requiere estrategia y planificación para tener éxito. Aquí hay algunos consejos y trucos que pueden ayudarle a mejorar su juego y ganar más guerras:
-
-
Esté atento a las noticias y eventos mundiales que afectan a su país y sus relaciones. El juego presenta una simulación realista de la situación mundial y los eventos que pueden cambiar el curso de la historia. Usted recibirá actualizaciones de noticias y alertas que le informarán de los asuntos actuales y los problemas que están sucediendo en todo el mundo. También verá cómo otros países reaccionan a estos eventos y cómo afectan sus relaciones con ellos. Debe prestar atención a estas noticias y eventos y ajustar su estrategia en consecuencia.
-
-
Forma alianzas con otros países y usa a tus espías para reunir información o sabotear a tus enemigos. La diplomacia es otro aspecto clave del juego que puede ayudarte a alcanzar tus objetivos o a prevenir conflictos. Usted puede formar alianzas con otros países que comparten sus intereses o ideología, y cooperar con ellos de varias maneras como el comercio, la ayuda, el apoyo militar, etc. También puede utilizar sus espías para recopilar información sobre los planes de otros países, fortalezas, debilidades, etc., o para sabotear su economía, militar, tecnología, etc. Sin embargo, debe tener cuidado de no quedar atrapado por su contrainteligencia, ya que esto puede dañar su reputación y relaciones.
-
-
Conclusión
-
Imperio Mundial 2027 APK es un juego emocionante y desafiante que le permite llevar a su país en un escenario futurista donde el mundo está en caos. Puedes elegir entre 180 países y usar la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio y competir con otros jugadores en línea o localmente en el modo multijugador. El juego presenta una simulación realista de la situación mundial y los eventos que pueden cambiar el curso de la historia. El juego también ofrece muchas características que lo hacen divertido y atractivo, como personalización, investigación, noticias, chat, etc.
-
Si usted es un fan de los juegos de guerra de estrategia o quiere poner a prueba sus habilidades de liderazgo y el pensamiento estratégico, usted debe descargar Imperio Mundial 2027 APK en sus dispositivos Android o PC con Windows. El juego es gratis para descargar y jugar, pero contiene compras en la aplicación que pueden mejorar su experiencia de juego. Puedes descargar el juego desde el sitio web oficial o Uptodown, o desde la Google Play Store si tienes un emulador en tu PC.
-
Entonces, ¿qué estás esperando? Descargar World Empire 2027 APK hoy y llevar a su país a la gloria!
-
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre World Empire 2027 APK:
-
-
-
Sí, Imperio Mundial 2027 APK es gratis para descargar y jugar, pero contiene compras en la aplicación que pueden mejorar su experiencia de juego.
-
¿Cómo puedo actualizar World Empire 2027 APK?
-
Puede actualizar Imperio Mundial 2027 APK visitando el sitio web oficial o Uptodown y descargar la última versión del juego. Alternativamente, puedes actualizar el juego desde Google Play Store si lo has instalado desde allí.
-
¿Cuáles son los requisitos del sistema para el Imperio Mundial 2027 APK?
-
Imperio Mundial 2027 APK requiere versión de Android 4.4 o superior para dispositivos Android, y Windows XP o superior para PC con Windows. El juego también requiere al menos 2 GB de RAM y una conexión a Internet estable.
-
¿Puedo jugar World Empire 2027 APK offline?
-
No, Imperio Mundial 2027 APK requiere una conexión a Internet para jugar, ya que es un juego multijugador que implica datos y eventos en tiempo real. Sin embargo, puedes jugar el juego en modo de un solo jugador contra la IA si lo deseas.
-
¿Cómo puedo contactar a los desarrolladores de World Empire 202 7 APK?
-
Puede ponerse en contacto con los desarrolladores de World Empire 2027 APK enviando un correo electrónico a igindis@gmail.com o visitando su sitio web en https://www.igindis.com/ También puede seguirlos en Facebook, Twitter, Instagram, YouTube y Discord para actualizaciones y noticias.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cuerda Hroe Vice Ciudad 6.5 Descarga.md b/spaces/Benson/text-generation/Examples/Cuerda Hroe Vice Ciudad 6.5 Descarga.md
deleted file mode 100644
index 474fb201d1f806106b86fadb3f7750f901906e5e..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cuerda Hroe Vice Ciudad 6.5 Descarga.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
Guerra relámpago de World of Warships Descargar: Cómo disfrutar del combate naval en su dispositivo móvil
-
¿Te gusta la guerra naval y la historia? ¿Quieres experimentar la emoción de comandar un buque de guerra en batallas épicas? ¿Quieres jugar un juego divertido y atractivo en tu dispositivo móvil? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar World of Warships Blitz War, un juego de acción gratuito que lleva el combate naval de la Segunda Guerra Mundial a dispositivos móviles y tabletas.
Un juego de acción gratuito que trae el combate naval de la Segunda Guerra Mundial a móviles y tabletas
-
World of Warships Blitz War es un juego desarrollado por Wargaming, la misma compañía detrás de los populares juegos de World of Tanks y World of Warplanes. Se basa en la galardonada versión para PC multijugador en línea de World of Warships, pero optimizada para dispositivos móviles. Le permite controlar buques de guerra realistas e históricamente precisos de diferentes naciones y épocas, como Japón, EE.UU., URSS, Reino Unido, Alemania, Italia, Francia y más. Puedes luchar en juegos multijugador online y offline y batallas navales contra otros jugadores o enemigos de IA.
-
Cuenta con más de 130 buques de guerra icónicos de diferentes naciones y épocas
-
World of Warships Blitz War presenta una colección inigualable de barcos históricos auténticos junto con máquinas navales de fantasía, ciencia ficción y ficción. Puede elegir entre cuatro clases de buques de guerra: acorazados, cruceros, destructores y portaaviones. Cada clase tiene sus propias características, ventajas y desventajas. Por ejemplo, los acorazados son potentes y duraderos, pero lentos y vulnerables a los torpedos. Los cruceros son versátiles y ágiles, pero tienen una armadura más débil. Los destructores son rápidos y sigilosos, pero tienen baja salud. Los portaaviones son unidades de apoyo que pueden lanzar aviones para explorar, atacar o defender.
-
Ofrece batallas épicas 7v7 rápidas y llenas de acción y jugabilidad estratégica
-
-
¿Cómo descargar e instalar World of Warships Blitz War?
-
Disponible para dispositivos iOS y Android
-
World of Warships Blitz War está disponible para iOS y Android
Disponible para dispositivos iOS y Android
-
World of Warships Blitz War está disponible para dispositivos iOS y Android, para que puedas disfrutar del combate naval en tu smartphone o tablet. El juego es gratis para descargar y jugar, pero puede contener compras en la aplicación para algunos artículos y características premium. Puedes descargar el juego desde la App Store o Google Play Store, dependiendo de tu dispositivo.
-
Requiere al menos 3 GB de espacio libre y una conexión a Internet estable
-
Antes de descargar e instalar World of Warships Blitz War, asegúrese de tener suficiente espacio libre en su dispositivo. El juego requiere al menos 3 GB de espacio libre para funcionar sin problemas, y también puede descargar datos adicionales durante el proceso de instalación. También necesitas una conexión a Internet estable para jugar online, ya que es un juego multijugador que te conecta con otros jugadores de todo el mundo.
-
Pasos para descargar e instalar el juego desde las fuentes oficiales
-
Para descargar e instalar World of Warships Blitz War desde las fuentes oficiales, sigue estos sencillos pasos:
-
-
-
Ir a la App Store o Google Play Store en su dispositivo y buscar World of Warships guerra relámpago.
-
Toca el icono del juego y luego toca el botón Instalar u Obtener para comenzar a descargar el juego.
-
Espera a que termine la descarga y luego toca el botón Abrir o Jugar para iniciar el juego.
-
Siga las instrucciones en pantalla para crear su cuenta, elija su servidor y complete el tutorial.
-
¡Disfruta del juego!
-
-
¿Cómo se juega guerra relámpago del mundo de los buques de guerra?
-
Elija su buque de guerra preferido de cuatro clases: acorazados, cruceros, destructores y portaaviones
-
-
-
Acorazados: Estos son los buques de guerra más pesados y poderosos del juego. Tienen una armadura gruesa, armas grandes y alta salud. Pueden infligir daño masivo a las naves enemigas con su batería principal y armas secundarias. Sin embargo, también son lentos, torpes y vulnerables a los torpedos y aviones. Son más adecuados para el combate de largo alcance y el daño de tanque para su equipo.
-
Cruceros: Estos son los buques de guerra más versátiles y ágiles del juego. Tienen armaduras medianas, cañones de disparo rápido y buena velocidad. Pueden realizar varios papeles en la batalla, como exploración, apoyo, flanqueo o caza. También pueden usar consumibles especiales, como sonar, radar, pantalla de humo o búsqueda hidroacústica. Sin embargo, tienen menos salud que los acorazados y pueden ser fácilmente penetrados por sus proyectiles. Son más adecuados para el combate de medio alcance y la adaptación a diferentes situaciones.
-
Destructores: Estos son los buques de guerra más pequeños y rápidos en el juego. Tienen armadura delgada, armas de fuego rápido y alto sigilo. Pueden lanzar torpedos contra naves enemigas desde una distancia segura o emboscarlos desde detrás de islas o pantallas de humo. También pueden capturar bases más rápido que otras clases. Sin embargo, tienen una salud muy baja y pueden ser destruidos por algunos impactos de cualquier nave. Son los más adecuados para combatir a corta distancia y acosar a los enemigos.
-
Portaaviones: Estas son las unidades de apoyo del juego. Tienen armadura débil, sin armas y baja velocidad. Pueden lanzar aviones desde su cubierta para explorar, atacar o defender a sus aliados o enemigos. Pueden controlar hasta tres escuadrones de aviones a la vez: cazas, bombarderos en picada o bombarderos torpederos. Sin embargo, tienen una maniobrabilidad muy limitada y son altamente dependientes de sus aviones. Son los más adecuados para el combate de largo alcance y proporcionar apoyo aéreo.
-
-
Personaliza tu nave de guerra con varios módulos, mejoras y camuflajes
-
-
Una vez que haya elegido su clase de buque de guerra y nación, puede personalizarlo con varios módulos, actualizaciones y camuflajes. Los módulos son partes de tu nave de guerra que afectan su rendimiento, como el casco, el motor, las armas, los torpedos, los aviones, etc. Puedes investigar y comprar nuevos módulos con la experiencia y los créditos obtenidos de las batallas. Las mejoras son mejoras que mejoran los atributos de tu nave de guerra, como la supervivencia, potencia de fuego, maniobrabilidad, ocultación, etc. Puedes comprar e instalar hasta seis mejoras por buque de guerra con créditos. Los camuflajes son artículos cosméticos que cambian la apariencia de su nave de guerra y también proporcionan algunas bonificaciones, como un rango de detección reducido o una mayor ganancia de experiencia. Puedes comprar camuflajes permanentes o temporales con créditos o oro.
-
Únete a una batalla y controla tu nave de guerra usando simples controles táctiles
-
Cuando estés listo para unirte a una batalla, puedes tocar el botón Batalla en el menú principal y elegir un modo. Serás emparejado con otros jugadores de habilidad y nivel similar. El juego cargará el mapa y los equipos. Verá su nave de guerra en la vista de puerto, donde puede verificar sus consumibles, señales y chatear con sus compañeros de equipo. Para iniciar la batalla, toca el botón Listo.
-
Una vez que comience la batalla, verás tu nave de guerra en la vista 3D, donde puedes controlarla usando simples controles táctiles. Puede utilizar el joystick virtual de la izquierda para dirigir su nave de guerra y ajustar su velocidad. Puede usar los botones de la derecha para disparar sus armas principales, lanzar torpedos o aviones. También puede utilizar los botones de la parte inferior para cambiar entre diferentes vistas, acercar o alejar, activar consumibles o acceder al mini-mapa. También puedes deslizar la pantalla para mirar alrededor y apuntar a tus enemigos.
-
Cooperar con sus aliados, detectar a sus enemigos, y utilizar sus armas y habilidades para ganar la batalla
-
-
Para ganar la batalla, tienes que cooperar con tus aliados, detectar a tus enemigos y usar tus armas y habilidades de manera efectiva. Tienes que comunicarte con tu equipo usando el chat o comandos rápidos. Tienes que buscar naves enemigas usando tu vista, radar, sonar o aviones. Tienes que apuntar a los puntos débiles de tus enemigos y esquivar su fuego. Tienes que usar tus consumibles en el momento y situación adecuados. Tienes que adaptarte a la marea cambiante de la batalla y tomar decisiones inteligentes.
-
¿Cómo mejorar tus habilidades y progreso en la guerra relámpago de World of Warships?
-
Conozca las fortalezas y debilidades de cada clase de buque de guerra y nación
-
Para mejorar tus habilidades y progreso en World of Warships Blitz War, tienes que aprender las fortalezas y debilidades de cada clase de buque de guerra y nación. Tienes que saber qué papel juega cada clase en la batalla y cómo contrarrestarlos. Tienes que saber qué nación tiene qué ventajas y desventajas en términos de potencia de fuego, armadura, velocidad, sigilo, etc. Tienes que saber qué módulos,
Aprende las fortalezas y debilidades de cada clase de buque de guerra y nación
-
Para mejorar tus habilidades y progreso en World of Warships Blitz War, tienes que aprender las fortalezas y debilidades de cada clase de buque de guerra y nación. Tienes que saber qué papel juega cada clase en la batalla y cómo contrarrestarlos. Tienes que saber qué nación tiene qué ventajas y desventajas en términos de potencia de fuego, armadura, velocidad, sigilo, etc. Tienes que saber qué módulos, mejoras y camuflajes se adaptan mejor a cada nave de guerra. Puedes encontrar información y consejos útiles en el sitio web oficial del juego, wiki, foros o canales de YouTube.
-
Estudia los mapas y usa el terreno a tu favor
-
-
Completa misiones, desafíos y eventos para ganar recompensas y desbloquear nuevos buques de guerra
-
Una tercera manera de mejorar tus habilidades y progreso en World of Warships Blitz War es completar misiones, desafíos y eventos para ganar recompensas y desbloquear nuevos buques de guerra. Puedes acceder a varias misiones y desafíos desde el menú principal, como misiones diarias, misiones semanales, misiones de campaña, etc. También puedes participar en varios eventos que ofrecen recompensas especiales, como eventos estacionales, históricos o batallas especiales. Al completar estas tareas, puede ganar experiencia, créditos, oro, contenedores, planos, fichas, etc. que puede utilizar para investigar y comprar nuevos buques de guerra u otros artículos.
-
Únete a una flota o crea la tuya propia para chatear, jugar y competir con otros jugadores
-
Una cuarta forma de mejorar tus habilidades y progreso en World of Warships Blitz War es unirte a una flota o crear la tuya propia para chatear, jugar y competir con otros jugadores. Una flota es un grupo de jugadores que comparten un nombre común, etiqueta, logotipo y canal de chat. Puedes unirte a una flota existente o crear la tuya invitando a tus amigos u otros jugadores. Al estar en una flota, puedes chatear con otros miembros, jugar juntos en divisiones o batallas de clanes, intercambiar regalos o recursos, ganar puntos de flota y recompensas, etc. También puedes competir con otras flotas en la clasificación de la flota o torneos.
-
Conclusión
-
-
Si quieres disfrutar del combate naval en tu dispositivo móvil, deberías descargar e instalar World of Warships Blitz War hoy. Puedes encontrar el juego en la App Store o Google Play Store, o visitar el sitio web oficial para obtener más información. También puede seguir el juego en las redes sociales o unirse a los foros de la comunidad para mantenerse actualizado e interactuar con otros jugadores. World of Warships Blitz War es un juego que te mantendrá enganchado durante horas y te hará sentir como un verdadero comandante naval.
-
Preguntas frecuentes
-
P: ¿Cómo puedo obtener más oro en Guerra Blitz de World of Warships?
-
A: El oro es la moneda premium en World of Warships Blitz War que se puede usar para comprar artículos y funciones premium, como barcos premium, cuentas premium, contenedores, etc. Puedes obtener más oro completando ciertas misiones o desafíos, participando en eventos especiales u ofertas, ver anuncios o comprarlos con dinero real.
-
P: ¿Cómo puedo obtener más planos en Guerra Blitz de World of Warships?
-
A: Los planos son artículos especiales que se pueden usar para investigar nuevas naves de guerra o actualizar las existentes. Puedes obtener más planos abriendo contenedores, completando misiones o desafíos, participando en eventos especiales u ofertas, o comprándolos con oro.
-
P: ¿Cómo puedo cambiar mi servidor en World of Warships Blitz War?
-
A: Puedes cambiar tu servidor en World of Warships Blitz War pulsando en el icono de configuración del menú principal y luego pulsando en la opción de servidor. Puede elegir entre cuatro servidores: Norteamérica, Europa, Asia o CIS. Sin embargo, cambiar tu servidor restablecerá tu progreso y tendrás que empezar desde cero.
-
P: ¿Cómo puedo reportar un error o un problema en Guerra Blitz de World of Warships?
-
-
P: ¿Cómo puedo unirme a una flota o crear la mía propia en World of Warships Blitz War?
-
A: Puedes unirte a una flota o crear la tuya propia en World of Warships Blitz War tocando el icono de la flota en el menú principal y luego tocando la opción de búsqueda o crear. A continuación, puede navegar a través de flotas existentes o crear su propia mediante el establecimiento de un nombre, etiqueta, logotipo, descripción, etc. También puede invitar a sus amigos u otros jugadores a unirse a su flota.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar El Certificado Del Consejo De Abogados De La India.md b/spaces/Benson/text-generation/Examples/Descargar El Certificado Del Consejo De Abogados De La India.md
deleted file mode 100644
index a28047aaaf15e5392599bfe5478c5d6d09419828..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar El Certificado Del Consejo De Abogados De La India.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
¿Cómo descargar el certificado del Colegio de Abogados de la India?
-
-
descargar el certificado del consejo de abogados de la India
¿Cuál es el propósito y los beneficios del certificado?
-
¿Cuáles son los criterios de elegibilidad y los pasos para solicitar el certificado?
-
¿Cómo descargar el certificado desde el sitio web o la aplicación del BCI?
-
¿Cuáles son algunos problemas y soluciones comunes para el certificado?
-
¿Cómo renovar y verificar el certificado?
-
-
Espero que encuentre este artículo útil e informativo. ¡Comencemos!
-
Propósito y beneficios del certificado
-
El propósito principal de emitir un certificado de práctica a los defensores es:
-
-
Asegúrese de que los defensores no practicantes o defensores que se trasladan a otra profesión o negocio se desplazan a la lista de defensores no practicantes.
-
Obtener el control de los Consejos de Abogados del Estado, así como otros órganos elegidos en virtud de la Ley de Defensores.
-
Tener comunicación directa y contacto con defensores practicantes.
-
Asegúrese de que todos los candidatos que se han inscrito después de 2010 aparecen en All India Bar Examination (AIBE) y lo pasan.
-
Asegúrese de que todos los beneficios ofrecidos a los defensores son disfrutados solo por los defensores practicantes.
-
-
Algunos de los beneficios de tener un certificado de práctica son:
-
-
Permite a un abogado ejercer la abogacía en cualquier tribunal o autoridad en la India.
-
Mejora la credibilidad y reputación de un defensor entre clientes, compañeros y jueces.
-
Da derecho a un defensor a varios planes de bienestar, planes de seguro, fondos de pensiones, etc., proporcionados por el ICB o los Consejos de Abogados del Estado.
-
Ayuda a un defensor a mantenerse actualizado con desarrollos legales, reglas, regulaciones, juicios, etc., a través de boletines del ICB, revistas, seminarios, talleres, etc.
-
Permite a un defensor participar en las elecciones del ICB, comités, subcomités, etc., y contribuir a la formulación de políticas y la gobernanza de la profesión jurídica.
-
-
-
Los criterios de elegibilidad para solicitar un certificado de práctica son:
-
-
-
El solicitante debe ser ciudadano de la India.
-
El solicitante debe tener un título de abogado (3 años/5 años) de un instituto de derecho reconocido aprobado por el BCI.
-
El solicitante debe estar inscrito en cualquier Consejo de Abogados del Estado como defensor.
-
El solicitante debe haber superado el AIBE realizado por el BCI dentro de los dos años de inscripción.
-
El solicitante debe pagar una cuota de inscripción de Rs. 600/- al BCI junto con el formulario de solicitud y otros documentos.
-
-
Los pasos para solicitar un certificado de práctica son:
-
-
Descargue el formulario de solicitud desde el sitio web del BCI o consígalo en la oficina del Consejo de Abogados del Estado.
-
Rellene los detalles como nombre, dirección, número de inscripción, fecha de inscripción, número de rollo AIBE, fecha de pasar AIBE, etc.
-
Adjuntar los siguientes documentos con el formulario de solicitud:
-
Una copia del certificado de inscripción emitido por el Consejo de Abogados del Estado.
-
Una copia del certificado de pase AIBE emitido por BCI.
-
Una copia del certificado de grado de derecho o certificado provisional.
-
Una copia de prueba de identidad como tarjeta Aadhaar, tarjeta PAN, tarjeta de identificación del votante, etc.
-
Dos fotografías tamaño pasaporte.
-
Un proyecto de demanda de Rs. 600/- a favor del Colegio de Abogados de la India pagadero en Nueva Delhi.
-
-
-
Envíe el formulario de solicitud junto con los documentos y la cuota a la oficina del Consejo de Abogados del Estado o enviarlo por correo a la oficina del BCI en Nueva Delhi.
-
Espere a que el BCI verifique y apruebe la solicitud. El procesamiento puede tardar hasta 30 días.
-
Una vez aprobado, recoger el certificado de práctica de la oficina del Consejo de Abogados del Estado o recibirlo por correo de BCI.
-
-
¿Cómo descargar el certificado desde el sitio web o la aplicación del BCI?
-
-
-
Visite el sitio web del BCI en https://www.barcouncilofindia.org/ o descargue la aplicación BCI desde Google Play Store o Apple App Store.
-
Inicie sesión con su número de inscripción y contraseña. Si no tiene una cuenta, regístrese con sus datos y cree una contraseña.
-
Ir a la sección de "Certificado de práctica" y haga clic en "Descargar certificado".
-
Seleccione el año y el mes de la emisión del certificado e introduzca su número de inscripción.
-
Haga clic en "Enviar" y descargue el archivo PDF de su certificado.
-
También puede imprimir o compartir el certificado según su conveniencia.
-
-
Problemas y soluciones comunes para el certificado
-
Algunos de los problemas comunes que enfrentan los defensores con respecto al certificado de práctica son:
-
-
El certificado se pierde, se daña o se roba.
-
El certificado no se recibe dentro de los 30 días de la solicitud.
-
El certificado contiene errores o discrepancias en nombre, dirección, número de inscripción, etc.
-
El certificado no es aceptado por algunos tribunales o autoridades como prueba válida de la práctica.
-
-
Algunas de las soluciones para estos problemas son:
-
-
Si el certificado se pierde, se daña o se roba, un defensor puede solicitar un certificado duplicado pagando una tarifa de Rs. 1000/- al BCI y presentar una declaración jurada indicando el motivo de la pérdida o daño junto con una copia de FIR en caso de robo. El certificado duplicado se emitirá dentro de los 15 días posteriores a la solicitud.
-
Si el certificado no se recibe dentro de los 30 días posteriores a la solicitud, un defensor puede ponerse en contacto con la oficina del Consejo de Abogados del Estado o la oficina del BCI y preguntar sobre el estado de su solicitud. También puede rastrear el estado en línea a través del sitio web o la aplicación del ICB ingresando su número de inscripción y fecha de nacimiento.
-
-
Si el certificado no es aceptado por algunos juzgados, tribunales o autoridades como prueba válida de la práctica, un abogado puede apelar ante el ICB o los Consejos de Abogados del Estado y solicitar su intervención y aclaración. También puede mostrar otros documentos como el documento de identidad, la tarjeta de inscripción, la tarjeta de pase AIBE, etc., para demostrar su elegibilidad para ejercer la abogacía en la India.
-
-
¿Cómo renovar y verificar el certificado?
-
El certificado de práctica es válido por cinco años a partir de la fecha de emisión. Un abogado tiene que renovar su certificado antes de su vencimiento mediante el pago de una cuota de renovación de Rs. 600/- al BCI y presentar un formulario de solicitud de renovación junto con una copia de su certificado existente. El certificado renovado se emitirá dentro de los 15 días posteriores a la solicitud. Un defensor también puede verificar su certificado en línea a través del sitio web o aplicación del BCI ingresando su número de inscripción y número de certificado. La verificación mostrará los detalles del certificado tales como nombre, dirección, fecha de emisión, fecha de vencimiento, etc. La verificación también mostrará si el certificado es válido, caducado, suspendido o cancelado.
Conclusión
-
En este artículo, he explicado cómo descargar el certificado del Colegio de Abogados de la India, que es un documento que certifica que un defensor es elegible para ejercer la abogacía en la India. También he explicado el propósito y los beneficios del certificado, los criterios de elegibilidad y los pasos para solicitar el certificado, los problemas comunes y las soluciones para el certificado, y cómo renovar y verificar el certificado. Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en ponerse en contacto conmigo. ¡Gracias por leer!
-
Preguntas frecuentes
-
¿Cuál es la diferencia entre el certificado de inscripción y el certificado de práctica?
-
-
¿Cómo puedo comprobar el estado de mi solicitud de certificado de práctica?
-
Puede comprobar el estado de su solicitud en línea a través del sitio web o aplicación del BCI ingresando su número de inscripción y fecha de nacimiento. También puede ponerse en contacto con la oficina del Consejo del Colegio de Abogados del Estado o la oficina del BCI y preguntar sobre el estado de su solicitud.
-
¿Cómo puedo cambiar mi dirección u otros detalles en mi certificado de práctica?
-
Puede cambiar su dirección u otros detalles en su certificado de práctica solicitando una corrección al BCI y pagando una tarifa de Rs. 500/-. Usted tiene que presentar una carta de solicitud junto con los documentos de apoyo como prueba de identidad, certificado de inscripción, certificado de pase AIBE, etc. El certificado corregido se emitirá dentro de los 15 días de la solicitud.
-
¿Cuáles son las consecuencias de no tener un certificado de práctica válido?
-
Si no tiene un certificado de práctica válido, puede enfrentar las siguientes consecuencias:
-
-
No se le puede permitir comparecer ante ningún tribunal, tribunal o autoridad como abogado.
-
Usted no puede tener derecho a ningún plan de bienestar, planes de seguro, fondos de pensiones, etc., proporcionado por el BCI o los Consejos de Abogados del Estado.
-
Es posible que no pueda participar en las elecciones del ICB, comités, subcomités, etc., y contribuir a la formulación de políticas y la gobernanza de la profesión jurídica.
-
Usted puede ser responsable de una acción disciplinaria por parte del BCI o los Consejos de Abogados del Estado por mala conducta profesional.
-
-
¿Dónde puedo obtener más información sobre el certificado de práctica?
-
Puede obtener más información sobre el certificado de práctica de las siguientes fuentes:
La aplicación BCI disponible en Google Play Store o Apple App Store.
-
La oficina del Consejo de Abogados en su estado.
-
La oficina de BCI en 21 Rouse Avenue Área Institucional ITO Cerca de Bal Bhawan Nueva Delhi - 110002.
-
-
La dirección de correo electrónico del ICB en info@barcouncilofindia.org.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Genshin Impacto Paso En Un Vasto.md b/spaces/Benson/text-generation/Examples/Descargar Genshin Impacto Paso En Un Vasto.md
deleted file mode 100644
index a6553116db8d1e5e6e206a7837e2ae329ce28019..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Genshin Impacto Paso En Un Vasto.md
+++ /dev/null
@@ -1,232 +0,0 @@
-
-
Descargar Genshin Impact: Paso a un Vasto Mundo Mágico de Aventura
-
¿Alguna vez has soñado con explorar un vasto mundo abierto lleno de maravillas, misterios y magia? ¿Quieres embarcarte en una aventura épica para encontrar a tu hermano perdido y descubrir los secretos de una tierra gobernada por poderosos dioses? ¿Quieres experimentar un sistema de combate lleno de acción que te permite liberar poderes elementales y cambiar entre diferentes personajes? Si respondiste sí a cualquiera de estas preguntas, entonces deberías descargar Genshin Impact, uno de los mejores juegos gratuitos jamás creados.
-
Genshin Impact es un juego de rol de acción de mundo abierto que te quitará el aliento con sus impresionantes gráficos, banda sonora inmersiva, una historia atractiva y una jugabilidad diversa. En este juego, puedes explorar siete naciones inspiradas en diferentes culturas y mitologías, conocer un colorido elenco de personajes con personalidades y habilidades únicas, y luchar contra enemigos formidables con tus amigos o en solitario. También puede personalizar su partido con más de 40 personajes de diferentes elementos y tipos de armas, así como actualizar su equipo y habilidades para adaptarse a su estilo de juego.
Si tienes curiosidad sobre este increíble juego y quieres saber más sobre él, sigue leyendo este artículo. Te contaremos todo lo que necesitas saber sobre Genshin Impact, desde cómo descargarlo en diferentes plataformas, cómo jugarlo eficazmente, cómo progresar sin problemas y cómo mejorar tu experiencia en él. Al final de este artículo, estarás listo para entrar en un vasto mundo mágico de aventura.
-
¿Qué es el impacto de Genshin?
-
-
El mundo del juego está dividido en siete regiones, cada una correspondiente a uno de los siete elementos: Anemo (viento), Geo (tierra), Pyro (fuego), Hydro (agua), Cryo (hielo), Electro (rayo), y Dendro (naturaleza). Cada región tiene su propia cultura, historia, monumentos, vida silvestre y clima. El jugador puede explorar el mundo libremente utilizando varios métodos de travesía, como caminar, escalar, nadar, planear y montar a caballo. El jugador también puede interactuar con varios objetos y PNJ en el mundo, como abrir cofres, recoger artículos, cocinar alimentos, fabricar armas, completar misiones y unirse a eventos.
-
El sistema de combate del juego se basa en el uso de habilidades y reacciones elementales. El jugador puede elegir hasta cuatro personajes para formar un grupo, cada uno con su propio elemento y tipo de arma. El jugador puede cambiar entre personajes en cualquier momento durante el combate, y utilizar sus habilidades para hacer daño y crear reacciones elementales. Las reacciones elementales son efectos especiales que ocurren cuando dos elementos diferentes entran en contacto, como la quema, congelación, electrocutación o explosión. Estos efectos pueden proporcionar varias ventajas o desventajas en combate, dependiendo de la situación.
-
Cómo descargar Genshin impacto en diferentes plataformas
-
Genshin Impact está disponible para su descarga en Windows PC, dispositivos Android, dispositivos iOS, PlayStation 4 y PlayStation 5. El juego es gratuito para descargar y jugar en todas las plataformas, pero requiere una conexión a Internet y una cuenta miHoYo para acceder. Estos son los pasos para descargar e instalar Genshin Impact en diferentes plataformas:
Haga clic en el icono "Windows" en la esquina superior derecha de la página.
-
Haga clic en el botón "Descargar ahora" y guarde el archivo en su ubicación preferida.
-
-
Abre el lanzador del juego e inicia sesión con tu cuenta miHoYo o crea uno si no tienes uno.
-
Haga clic en el botón "Obtener juego" y espere a que el juego se descargue e instale.
-
Haga clic en el botón "Lanzar" y disfrute del juego.
-
-
Requisitos del sistema de PC
-
-
-
Requisitos mínimos
-
Requisitos recomendados
-
-
-
OS: Windows 7 SP1 de 64 bits o superior
-
OS: Windows 10 64-bit
-
-
-
CPU: Intel Core i5 o equivalente
-
CPU: Intel Core i7 o equivalente
-
-
-
RAM: 8 GB
-
RAM: 16 GB
-
-
-
GPU: NVIDIA GeForce GT 1030 o superior
-
GPU: NVIDIA GeForce RTX 1060 6 GB o superior
-
-
-
DirectX: Versión 11
-
DirectX: Versión 11
-
-
-
Almacenamiento: 30 GB de espacio disponible
-
Almacenamiento: 30 GB de espacio disponible
-
-
-
Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo
-
Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo
-
-
-
Cómo descargar Genshin impacto en dispositivos Android
-
-
Ir a la aplicación Google Play Store en su dispositivo y buscar "Impacto Genshin".
-
Seleccione el juego de los resultados de búsqueda y toque en el "Instalar" botón.
-
Espera a que el juego se descargue e instale en tu dispositivo.
-
Abra la aplicación del juego e inicie sesión con su cuenta miHoYo o cree una si no tiene una.
-
Siga las instrucciones para descargar datos adicionales y comenzar el juego.
-
Disfruta del juego.
-
-
Requisitos del sistema móvil
Requisitos del sistema móvil
-
-
-
Dispositivos soportados
-
Tamaño del archivo
-
-
-
iOS 9.0 y superior, iPhone 8 Plus y superior, iPad Air 3 y superior, iPad mini 5 y superior, iPad Pro y superior
-
Acerca de 9 GB
-
-
-
Cómo descargar Genshin Impact en PlayStation 4 y PlayStation 5
-
-
-
Seleccione el juego de los resultados de búsqueda y haga clic en el botón "Descargar".
-
Espera a que el juego se descargue e instale en tu consola.
-
Abra la aplicación del juego e inicie sesión con su cuenta miHoYo o cree una si no tiene una.
-
Comienza el juego y disfrútalo.
-
-
Cómo jugar Genshin impacto
-
Ahora que ha descargado Genshin Impact en su plataforma preferida, usted está listo para jugar. Sin embargo, antes de sumergirte en el juego, es posible que quieras aprender algunos consejos y trucos básicos sobre cómo controlar a tu personaje, cambiar entre los miembros del grupo, usar habilidades elementales e interactuar con el mundo. Estas son algunas de las cosas esenciales que necesitas saber para jugar Genshin Impact eficazmente:
-
Cómo controlar tu carácter
-
Los controles del juego varían dependiendo de la plataforma en la que estés jugando. Estos son los controles predeterminados para cada plataforma:
L1/R1/L2/R2/D-pad o parachoques izquierdo/parachoques derecho/gatillo izquierdo/gatillo derecho/D-pad.
-
-
Cómo progresar en el impacto de Genshin
-
Ahora que sabes cómo jugar Genshin Impact, es posible que te preguntes cómo progresar en el juego y desbloquear más contenido y características. El juego tiene muchas cosas que ofrecer, pero algunos de ellos están detrás de ciertos requisitos o niveles. Estos son algunos de los aspectos clave del sistema de progresión del juego y cómo conseguirlos:
-
Cómo subir de nivel a tu personaje, armas y artefactos
-
Una de las cosas más importantes que hacer en Genshin Impact es subir de nivel a tu personaje, armas y artefactos. Estas son las principales fuentes de tus estadísticas y habilidades, y determinarán qué tan bien puedes manejar los desafíos y los enemigos en el juego. Aquí hay algunos consejos sobre cómo subir de nivel a tu personaje, armas y artefactos:
-
-
Para subir de nivel a tu personaje, necesitas usar materiales de Character EXP, como Wanderer’s Advice, Adventurer’s Experience y Hero’s Wit. Puedes obtener estos materiales de varias fuentes, como completar misiones, abrir cofres, derrotar enemigos y participar en eventos. También puede usar otros caracteres como materiales EXP, pero esto no es recomendable ya que consumirá sus recursos y limitará sus opciones.
-
Para subir de nivel sus armas, es necesario utilizar materiales de mejora de armas, tales como Mineral de mejora, Mineral de mejora fina y Mineral de mejora mística. Usted puede obtener estos materiales de varias fuentes, tales como la elaboración de ellos en un herrero, la minería de ellos de los depósitos de mineral, apertura de cofres, derrotar a los enemigos, y participar en eventos. También puede utilizar otras armas como materiales de mejora, pero esto no se recomienda, ya que consumirá sus recursos y limitar sus opciones.
-
-
Para aumentar el nivel máximo de tu personaje, armas y artefactos, necesitas ascenderlos usando materiales específicos. Puedes obtener estos materiales de varias fuentes, como explorar el mundo, completar dominios, derrotar jefes y comprarlos en tiendas. Puedes ascender a tu personaje en los niveles 20, 40, 50, 60, 70 y 80. Puedes ascender tus armas en los niveles 20, 40, 50, 60, 70 y 80. Puedes ascender tus artefactos a nivel 4 y 8.
-
-
Cómo desbloquear nuevas regiones, misiones y características
-
Otra cosa importante que hacer en Genshin Impact es desbloquear nuevas regiones, misiones y características. Estas son las principales fuentes de tu contenido y disfrute, y te proporcionarán más oportunidades y recompensas en el juego. Aquí hay algunos consejos sobre cómo desbloquear nuevas regiones, misiones y características:
-
-
Para desbloquear nuevas regiones, necesitas aumentar tu Rango de Aventurero (AR). Esta es una medida de su progreso general y experiencia en el juego, y determinará qué regiones, misiones y características están disponibles para usted. Puedes aumentar tu RA completando misiones, abriendo cofres, descubriendo ubicaciones, activando teletransportadores y estatuas, y participando en eventos.
-
Para desbloquear nuevas misiones, necesitas cumplir ciertos requisitos o condiciones. Estos pueden variar dependiendo del tipo y la dificultad de la misión. Algunas misiones están relacionadas con la historia y se desbloquearán automáticamente a medida que avanzas en el juego. Algunas misiones están relacionadas con el mundo y se desbloquearán explorando el mundo o interactuando con NPCs. Algunas misiones están relacionadas con el dominio y se desbloquearán completando dominios o alcanzando ciertos niveles de AR.
-
Para desbloquear nuevas características, necesitas completar ciertas misiones o alcanzar ciertos niveles de AR. Estas características incluyen el modo cooperativo, el abismo en espiral, el sistema de reputación, el sistema de vivienda, el sistema de pesca y más. Estas características mejorarán tu experiencia de juego y te proporcionarán más opciones y recompensas en el juego.
-
-
-
Genshin Impact ya es un gran juego que ofrece mucha diversión y emoción, pero todavía hay maneras de mejorar tu experiencia en él aún más. Aquí hay algunas sugerencias sobre cómo aprovechar al máximo su aventura en Teyvat:
-
Juego multiplataforma
-
Una de las mejores características de Genshin Impact es su juego multiplataforma. Esto significa que puedes jugar con tus amigos en diferentes dispositivos, como PC, móvil y PlayStation. También puede cambiar entre dispositivos sin perder su progreso o datos. Para habilitar el juego multiplataforma, el juego. Sin embargo, el juego tiene un modo sin conexión que le permite jugar el juego sin conexión a Internet por un tiempo limitado. Puede activar el modo sin conexión yendo al menú Paimon y seleccionando "Configuración" y luego "Otro" y luego "Red". Sin embargo, no podrás acceder a algunas de las características y contenido del juego en modo offline, como modo cooperativo, eventos, correo y actualizaciones.
-
¿Es Genshin Impact cross-play?
-
Sí, Genshin Impact es un juego cruzado, lo que significa que puedes jugar con tus amigos en diferentes dispositivos, como PC, móvil y PlayStation. También puede cambiar entre dispositivos sin perder su progreso o datos. Sin embargo, debes asegurarte de que tú y tus amigos estén jugando en el mismo servidor y hayan alcanzado el rango de aventurero 16 o superior para acceder al modo cooperativo.
-
¿Es Genshin Impact de pago a ganancia?
-
-
¿Es seguro el impacto de Genshin?
-
Sí, Genshin Impact es seguro, lo que significa que no contiene ningún contenido dañino o malicioso o software que pueda dañar su dispositivo o información personal. El juego está desarrollado por una empresa de confianza, miHoYo, que ha estado haciendo juegos exitosos y populares durante años. El juego también está verificado y certificado por varias plataformas y autoridades, como Google Play Store, App Store, PlayStation Store, ESRB, PEGI, CERO y más. El juego también tiene una política de privacidad y términos de servicio que protegen sus derechos e intereses como usuario. Sin embargo, debe ser cuidadoso y responsable al jugar el juego en línea, como no compartir su cuenta o información personal con nadie, no hacer clic en enlaces o anuncios sospechosos, no descargar ningún software o mods no oficiales o no autorizados, y no participar en actividades ilegales o poco éticas.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BetterAPI/BetterChat_new/README.md b/spaces/BetterAPI/BetterChat_new/README.md
deleted file mode 100644
index 630e4d27eb77e20dafb4c109efe03577e4b84f41..0000000000000000000000000000000000000000
--- a/spaces/BetterAPI/BetterChat_new/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: BetterChat - AI for everyone
-sdk: docker
-emoji: ⚡
-colorTo: blue
-pinned: true
-license: mit
-colorFrom: gray
-duplicated_from: BetterAPI/BetterChat
----
-
-
-### BetterChat
\ No newline at end of file
diff --git a/spaces/CVPR/BigDL-Nano_inference/data.py b/spaces/CVPR/BigDL-Nano_inference/data.py
deleted file mode 100644
index d301a5d3bbdda1ebc888bd5e823cb1ada41ae15c..0000000000000000000000000000000000000000
--- a/spaces/CVPR/BigDL-Nano_inference/data.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# This file is copied from https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/data.py
-
-# MIT License
-
-# Copyright (c) 2022 Lorenzo Breschi
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-from typing import Callable, Dict
-
-import torch
-
-from torch.utils.data import Dataset
-
-import torchvision.transforms.functional as F
-from torchvision import transforms
-import pytorch_lightning as pl
-
-from collections.abc import Iterable
-
-
-# image reader writer
-from pathlib import Path
-from PIL import Image
-from typing import Tuple
-
-
-def read_image(filepath: Path, mode: str = None) -> Image:
- with open(filepath, 'rb') as file:
- image = Image.open(file)
- return image.convert(mode)
-
-
-image2tensor = transforms.ToTensor()
-tensor2image = transforms.ToPILImage()
-
-
-def write_image(image: Image, filepath: Path):
- filepath.parent.mkdir(parents=True, exist_ok=True)
- image.save(str(filepath))
-
-
-def read_image_tensor(filepath: Path, mode: str = 'RGB') -> torch.Tensor:
- return image2tensor(read_image(filepath, mode))
-
-
-def write_image_tensor(input: torch.Tensor, filepath: Path):
- write_image(tensor2image(input), filepath)
-
-
-def get_valid_indices(H: int, W: int, patch_size: int, random_overlap: int = 0):
-
- vih = torch.arange(random_overlap, H-patch_size -
- random_overlap+1, patch_size)
- viw = torch.arange(random_overlap, W-patch_size -
- random_overlap+1, patch_size)
- if random_overlap > 0:
- rih = torch.randint_like(vih, -random_overlap, random_overlap)
- riw = torch.randint_like(viw, -random_overlap, random_overlap)
- vih += rih
- viw += riw
- vi = torch.stack(torch.meshgrid(vih, viw)).view(2, -1).t()
- return vi
-
-
-def cut_patches(input: torch.Tensor, indices: Tuple[Tuple[int, int]], patch_size: int, padding: int = 0):
- # TODO use slices to get all patches at the same time ?
-
- patches_l = []
- for n in range(len(indices)):
-
- patch = F.crop(input, *(indices[n]-padding),
- *(patch_size+padding*2,)*2)
- patches_l.append(patch)
- patches = torch.cat(patches_l, dim=0)
-
- return patches
-
-
-def prepare_data(data_path: Path, read_func: Callable = read_image_tensor) -> Dict:
- """
- Takes a data_path of a folder which contains subfolders with input, target, etc.
- lablelled by the same names.
- :param data_path: Path of the folder containing data
- :param read_func: function that reads data and returns a tensor
- """
- data_dict = {}
-
- subdir_names = ["target", "input", "mask"] # ,"helper"
-
- # checks only files for which there is an target
- # TODO check for images
- name_ls = [file.name for file in (
- data_path / "target").iterdir() if file.is_file()]
-
- subdirs = [data_path / sdn for sdn in subdir_names]
- for sd in subdirs:
- if sd.is_dir():
- data_ls = []
- files = [sd / name for name in name_ls]
- for file in files:
- tensor = read_func(file)
- H, W = tensor.shape[-2:]
- data_ls.append(tensor)
- # TODO check that all sizes match
- data_dict[sd.name] = torch.stack(data_ls, dim=0)
-
- data_dict['name'] = name_ls
- data_dict['len'] = len(data_dict['name'])
- data_dict['H'] = H
- data_dict['W'] = W
- return data_dict
-
-
-# TODO an image is loaded whenever a patch is needed, this may be a bottleneck
-class DataDictLoader():
- def __init__(self, data_dict: Dict,
- batch_size: int = 16,
- max_length: int = 128,
- shuffle: bool = False):
- """
- """
-
- self.batch_size = batch_size
- self.shuffle = shuffle
-
- self.batch_size = batch_size
-
- self.data_dict = data_dict
- self.dataset_len = data_dict['len']
- self.len = self.dataset_len if max_length is None else min(
- self.dataset_len, max_length)
- # Calculate # batches
- num_batches, remainder = divmod(self.len, self.batch_size)
- if remainder > 0:
- num_batches += 1
- self.num_batches = num_batches
-
- def __iter__(self):
- if self.shuffle:
- r = torch.randperm(self.dataset_len)
- self.data_dict = {k: v[r] if isinstance(
- v, Iterable) else v for k, v in self.data_dict.items()}
- self.i = 0
- return self
-
- def __next__(self):
- if self.i >= self.len:
- raise StopIteration
- batch = {k: v[self.i:self.i+self.batch_size]
- if isinstance(v, Iterable) else v for k, v in self.data_dict.items()}
-
- self.i += self.batch_size
- return batch
-
- def __len__(self):
- return self.num_batches
-
-
-class PatchDataModule(pl.LightningDataModule):
-
- def __init__(self, data_dict,
- patch_size: int = 2**5,
- batch_size: int = 2**4,
- patch_num: int = 2**6):
- super().__init__()
- self.data_dict = data_dict
- self.H, self.W = data_dict['H'], data_dict['W']
- self.len = data_dict['len']
-
- self.batch_size = batch_size
- self.patch_size = patch_size
- self.patch_num = patch_num
-
- def dataloader(self, data_dict, **kwargs):
- return DataDictLoader(data_dict, **kwargs)
-
- def train_dataloader(self):
- patches = self.cut_patches()
- return self.dataloader(patches, batch_size=self.batch_size, shuffle=True,
- max_length=self.patch_num)
-
- def val_dataloader(self):
- return self.dataloader(self.data_dict, batch_size=1)
-
- def test_dataloader(self):
- return self.dataloader(self.data_dict) # TODO batch size
-
- def cut_patches(self):
- # TODO cycle once
- patch_indices = get_valid_indices(
- self.H, self.W, self.patch_size, self.patch_size//4)
- dd = {k: cut_patches(
- v, patch_indices, self.patch_size) for k, v in self.data_dict.items()
- if isinstance(v, torch.Tensor)
- }
- threshold = 0.1
- mask_p = torch.mean(
- dd.get('mask', torch.ones_like(dd['input'])), dim=(-1, -2, -3))
- masked_idx = (mask_p > threshold).nonzero(as_tuple=True)[0]
- dd = {k: v[masked_idx] for k, v in dd.items()}
- dd['len'] = len(masked_idx)
- dd['H'], dd['W'] = (self.patch_size,)*2
-
- return dd
-
-
-class ImageDataset(Dataset):
- def __init__(self, file_paths: Iterable, read_func: Callable = read_image_tensor):
- self.file_paths = file_paths
-
- def __getitem__(self, idx: int) -> dict:
- file = self.file_paths[idx]
- return read_image_tensor(file), file.name
-
- def __len__(self) -> int:
- return len(self.file_paths)
\ No newline at end of file
diff --git a/spaces/CVPR/LIVE/thrust/cmake/ThrustBuildCompilerTargets.cmake b/spaces/CVPR/LIVE/thrust/cmake/ThrustBuildCompilerTargets.cmake
deleted file mode 100644
index 6e84ec897b4c6d235fc8afcf50cc6e45bd225114..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/cmake/ThrustBuildCompilerTargets.cmake
+++ /dev/null
@@ -1,150 +0,0 @@
-#
-# This file defines the `thrust_build_compiler_targets()` function, which
-# creates the following interface targets:
-#
-# thrust.compiler_interface
-# - Interface target providing compiler-specific options needed to build
-# Thrust's tests, examples, etc.
-#
-# thrust.promote_cudafe_warnings
-# - Interface target that adds warning promotion for NVCC cudafe invocations.
-# - Only exists to work around github issue #1174 on tbb.cuda configurations.
-# - May be combined with thrust.compiler_interface when #1174 is fully resolved.
-
-function(thrust_build_compiler_targets)
- set(cxx_compile_definitions)
- set(cxx_compile_options)
-
- thrust_update_system_found_flags()
-
- if (THRUST_TBB_FOUND)
- # There's a ton of these in the TBB backend, even though the code is correct.
- # TODO: silence these warnings in code instead
- append_option_if_available("-Wno-unused-parameter" cxx_compile_options)
- endif()
-
- if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
- # TODO Enable /Wall instead of W3
- append_option_if_available("/W3" cxx_compile_options)
-
- # Treat all warnings as errors:
- append_option_if_available("/WX" cxx_compile_options)
-
- # Disabled loss-of-data conversion warnings.
- # TODO Re-enable.
- append_option_if_available("/wd4244" cxx_compile_options)
- append_option_if_available("/wd4267" cxx_compile_options)
-
- # Suppress numeric conversion-to-bool warnings.
- # TODO Re-enable.
- append_option_if_available("/wd4800" cxx_compile_options)
-
- # Disable warning about applying unary operator- to unsigned type.
- append_option_if_available("/wd4146" cxx_compile_options)
-
- # MSVC STL assumes that `allocator_traits`'s allocator will use raw pointers,
- # and the `__DECLSPEC_ALLOCATOR` macro causes issues with thrust's universal
- # allocators:
- # warning C4494: 'std::allocator_traits<_Alloc>::allocate' :
- # Ignoring __declspec(allocator) because the function return type is not
- # a pointer or reference
- # See https://github.com/microsoft/STL/issues/696
- append_option_if_available("/wd4494" cxx_compile_options)
-
- # Some of the async tests require /bigobj to fit all their sections into the
- # object files:
- append_option_if_available("/bigobj" cxx_compile_options)
-
- # "Oh right, this is Visual Studio."
- list(APPEND cxx_compile_definitions "NOMINMAX")
- else()
- append_option_if_available("-Werror" cxx_compile_options)
- append_option_if_available("-Wall" cxx_compile_options)
- append_option_if_available("-Wextra" cxx_compile_options)
- append_option_if_available("-Winit-self" cxx_compile_options)
- append_option_if_available("-Woverloaded-virtual" cxx_compile_options)
- append_option_if_available("-Wcast-qual" cxx_compile_options)
- append_option_if_available("-Wno-cast-align" cxx_compile_options)
- append_option_if_available("-Wno-long-long" cxx_compile_options)
- append_option_if_available("-Wno-variadic-macros" cxx_compile_options)
- append_option_if_available("-Wno-unused-function" cxx_compile_options)
- append_option_if_available("-Wno-unused-variable" cxx_compile_options)
- endif()
-
- if ("GNU" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.5)
- # In GCC 4.4, the CUDA backend's kernel launch templates cause
- # impossible-to-decipher "'' is used uninitialized in this
- # function" warnings, so we disable uninitialized variable warnings.
- append_option_if_available("-Wno-uninitialized" cxx_compile_options)
- endif()
-
- if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 4.5)
- # This isn't available until GCC 4.3, and misfires on TMP code until
- # GCC 4.5.
- append_option_if_available("-Wlogical-op" cxx_compile_options)
- endif()
-
- if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 7.3)
- # GCC 7.3 complains about name mangling changes due to `noexcept`
- # becoming part of the type system; we don't care.
- append_option_if_available("-Wno-noexcept-type" cxx_compile_options)
- endif()
- endif()
-
- if (("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}") OR
- ("XL" STREQUAL "${CMAKE_CXX_COMPILER_ID}"))
- # xlC and Clang warn about unused parameters in uninstantiated templates.
- # This causes xlC to choke on the OMP backend, which is mostly #ifdef'd out
- # (and thus has unused parameters) when you aren't using it.
- append_option_if_available("-Wno-unused-parameters" cxx_compile_options)
- endif()
-
- if ("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
- # -Wunneeded-internal-declaration misfires in the unit test framework
- # on older versions of Clang.
- append_option_if_available("-Wno-unneeded-internal-declaration" cxx_compile_options)
- endif()
-
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
- # Today:
- # * NVCC accepts CUDA C++ in .cu files but not .cpp files.
- # * Feta accepts CUDA C++ in .cpp files but not .cu files.
- # TODO: This won't be necessary in the future.
- list(APPEND cxx_compile_options -cppsuffix=cu)
- endif()
-
- add_library(thrust.compiler_interface INTERFACE)
-
- foreach (cxx_option IN LISTS cxx_compile_options)
- target_compile_options(thrust.compiler_interface INTERFACE
- $<$:${cxx_option}>
- $<$,$>:${cxx_option}>
- # Only use -Xcompiler with NVCC, not Feta.
- #
- # CMake can't split genexs, so this can't be formatted better :(
- # This is:
- # if (using CUDA and CUDA_COMPILER is NVCC) add -Xcompiler=opt:
- $<$,$>:-Xcompiler=${cxx_option}>
- )
- endforeach()
-
- foreach (cxx_definition IN LISTS cxx_compile_definitions)
- # Add these for both CUDA and CXX targets:
- target_compile_definitions(thrust.compiler_interface INTERFACE
- ${cxx_definition}
- )
- endforeach()
-
- # Display warning numbers from nvcc cudafe errors:
- target_compile_options(thrust.compiler_interface INTERFACE
- # If using CUDA w/ NVCC...
- $<$,$>:-Xcudafe=--display_error_number>
- )
-
- # This is kept separate for Github issue #1174.
- add_library(thrust.promote_cudafe_warnings INTERFACE)
- target_compile_options(thrust.promote_cudafe_warnings INTERFACE
- $<$,$>:-Xcudafe=--promote_warnings>
- )
-endfunction()
diff --git a/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/__init__.py b/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/__init__.py
deleted file mode 100644
index 98a96370ef04570f516052bb73f568d0ebc346c3..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .modules import *
-from .parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to
diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/logger.py b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/logger.py
deleted file mode 100644
index 18145f54c927abd59b95f3fa6e6da8002bc2ce97..0000000000000000000000000000000000000000
--- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/logger.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import functools
-import logging
-import os
-import sys
-
-from termcolor import colored
-
-
-class _ColorfulFormatter(logging.Formatter):
- def __init__(self, *args, **kwargs):
- self._root_name = kwargs.pop("root_name") + "."
- self._abbrev_name = kwargs.pop("abbrev_name", "")
- if len(self._abbrev_name):
- self._abbrev_name = self._abbrev_name + "."
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
-
- def formatMessage(self, record):
- record.name = record.name.replace(self._root_name, self._abbrev_name)
- log = super(_ColorfulFormatter, self).formatMessage(record)
- if record.levelno == logging.WARNING:
- prefix = colored("WARNING", "red", attrs=["blink"])
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
- else:
- return log
- return prefix + " " + log
-
-
-# so that calling setup_logger multiple times won't add many handlers
-@functools.lru_cache()
-def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
- """
- Initialize the detectron2 logger and set its verbosity level to "INFO".
-
- Args:
- output (str): a file name or a directory to save log. If None, will not save log file.
- If ends with ".txt" or ".log", assumed to be a file name.
- Otherwise, logs will be saved to `output/log.txt`.
- name (str): the root module name of this logger
-
- Returns:
- logging.Logger: a logger
- """
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
- logger.propagate = False
-
- if abbrev_name is None:
- abbrev_name = name
-
- plain_formatter = logging.Formatter(
- "[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
- )
- # stdout logging: master only
- if distributed_rank == 0:
- ch = logging.StreamHandler(stream=sys.stdout)
- ch.setLevel(logging.DEBUG)
- if color:
- formatter = _ColorfulFormatter(
- colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
- datefmt="%m/%d %H:%M:%S",
- root_name=name,
- abbrev_name=str(abbrev_name),
- )
- else:
- formatter = plain_formatter
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
- # file logging: all workers
- if output is not None:
- if output.endswith(".txt") or output.endswith(".log"):
- filename = output
- else:
- filename = os.path.join(output, "log.txt")
- if distributed_rank > 0:
- filename = filename + f".rank{distributed_rank}"
- os.makedirs(os.path.dirname(filename), exist_ok=True)
-
- fh = logging.StreamHandler(_cached_log_stream(filename))
- fh.setLevel(logging.DEBUG)
- fh.setFormatter(plain_formatter)
- logger.addHandler(fh)
-
- return logger
-
-
-# cache the opened file object, so that different calls to `setup_logger`
-# with the same file name can safely write to the same file.
-@functools.lru_cache(maxsize=None)
-def _cached_log_stream(filename):
- return open(filename, "a")
diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/__init__.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/dpt_depth.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/dpt_depth.py
deleted file mode 100644
index 4e9aab5d2767dffea39da5b3f30e2798688216f1..0000000000000000000000000000000000000000
--- a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/dpt_depth.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .base_model import BaseModel
-from .blocks import (
- FeatureFusionBlock,
- FeatureFusionBlock_custom,
- Interpolate,
- _make_encoder,
- forward_vit,
-)
-
-
-def _make_fusion_block(features, use_bn):
- return FeatureFusionBlock_custom(
- features,
- nn.ReLU(False),
- deconv=False,
- bn=use_bn,
- expand=False,
- align_corners=True,
- )
-
-
-class DPT(BaseModel):
- def __init__(
- self,
- head,
- features=256,
- backbone="vitb_rn50_384",
- readout="project",
- channels_last=False,
- use_bn=False,
- ):
-
- super(DPT, self).__init__()
-
- self.channels_last = channels_last
-
- hooks = {
- "vitb_rn50_384": [0, 1, 8, 11],
- "vitb16_384": [2, 5, 8, 11],
- "vitl16_384": [5, 11, 17, 23],
- }
-
- # Instantiate backbone and reassemble blocks
- self.pretrained, self.scratch = _make_encoder(
- backbone,
- features,
- False, # Set to true of you want to train from scratch, uses ImageNet weights
- groups=1,
- expand=False,
- exportable=False,
- hooks=hooks[backbone],
- use_readout=readout,
- )
-
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
-
- self.scratch.output_conv = head
-
-
- def forward(self, x):
- if self.channels_last == True:
- x.contiguous(memory_format=torch.channels_last)
-
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
-
- layer_1_rn = self.scratch.layer1_rn(layer_1)
- layer_2_rn = self.scratch.layer2_rn(layer_2)
- layer_3_rn = self.scratch.layer3_rn(layer_3)
- layer_4_rn = self.scratch.layer4_rn(layer_4)
-
- path_4 = self.scratch.refinenet4(layer_4_rn)
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
-
- out = self.scratch.output_conv(path_1)
-
- return out
-
-
-class DPTDepthModel(DPT):
- def __init__(self, path=None, non_negative=True, **kwargs):
- features = kwargs["features"] if "features" in kwargs else 256
-
- head = nn.Sequential(
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
- nn.ReLU(True),
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
- nn.ReLU(True) if non_negative else nn.Identity(),
- nn.Identity(),
- )
-
- super().__init__(head, **kwargs)
-
- if path is not None:
- self.load(path)
-
- def forward(self, x):
- return super().forward(x).squeeze(dim=1)
-
diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/modeling_llama.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/modeling_llama.py
deleted file mode 100644
index 12d980e189d902fb1a6d9ea05dc3ca91959b1c8c..0000000000000000000000000000000000000000
--- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/modeling_llama.py
+++ /dev/null
@@ -1,755 +0,0 @@
-# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
-
-""" PyTorch LLaMA model."""
-import math
-from typing import List, Optional, Tuple, Union
-
-import torch
-import torch.utils.checkpoint
-from torch import nn
-from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
-
-from transformers.activations import ACT2FN
-from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
-from transformers.modeling_utils import PreTrainedModel
-from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
-from transformers.models.llama.configuration_llama import LlamaConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CONFIG_FOR_DOC = "LlamaConfig"
-
-
-# Copied from transformers.models.bart.modeling_bart._make_causal_mask
-def _make_causal_mask(
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
-):
- """
- Make causal mask used for bi-directional self-attention.
- """
- bsz, tgt_len = input_ids_shape
- mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
- mask_cond = torch.arange(mask.size(-1), device=device)
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
- mask = mask.to(dtype)
-
- if past_key_values_length > 0:
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
-
-
-# Copied from transformers.models.bart.modeling_bart._expand_mask
-def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- bsz, src_len = mask.size()
- tgt_len = tgt_len if tgt_len is not None else src_len
-
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
-
- inverted_mask = 1.0 - expanded_mask
-
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
-
-
-class LlamaRMSNorm(nn.Module):
- def __init__(self, hidden_size, eps=1e-6):
- """
- LlamaRMSNorm is equivalent to T5LayerNorm
- """
- super().__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.variance_epsilon = eps
-
- def forward(self, hidden_states):
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
-
- # convert into half-precision if necessary
- if self.weight.dtype in [torch.float16, torch.bfloat16]:
- hidden_states = hidden_states.to(self.weight.dtype)
-
- return self.weight * hidden_states
-
-
-class LlamaRotaryEmbedding(torch.nn.Module):
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
- super().__init__()
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
- self.register_buffer("inv_freq", inv_freq)
-
- # Build here to make `torch.jit.trace` work.
- self.max_seq_len_cached = max_position_embeddings
- t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
- emb = torch.cat((freqs, freqs), dim=-1)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
-
- def forward(self, x, seq_len=None):
- # x: [bs, num_attention_heads, seq_len, head_size]
- # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
- if seq_len > self.max_seq_len_cached:
- self.max_seq_len_cached = seq_len
- t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
- return (
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- )
-
-
-def rotate_half(x):
- """Rotates half the hidden dims of the input."""
- x1 = x[..., : x.shape[-1] // 2]
- x2 = x[..., x.shape[-1] // 2 :]
- return torch.cat((-x2, x1), dim=-1)
-
-
-def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
- gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
- gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
- cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
- sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
- q_embed = (q * cos) + (rotate_half(q) * sin)
- k_embed = (k * cos) + (rotate_half(k) * sin)
- return q_embed, k_embed
-
-
-class LlamaMLP(nn.Module):
- def __init__(
- self,
- hidden_size: int,
- intermediate_size: int,
- hidden_act: str,
- ):
- super().__init__()
- self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
- self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
- self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
- self.act_fn = ACT2FN[hidden_act]
-
- def forward(self, x):
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
-
-
-class LlamaAttention(nn.Module):
- """Multi-headed attention from 'Attention Is All You Need' paper"""
-
- def __init__(self, config: LlamaConfig):
- super().__init__()
- self.config = config
- self.hidden_size = config.hidden_size
- self.num_heads = config.num_attention_heads
- self.head_dim = self.hidden_size // self.num_heads
- self.max_position_embeddings = config.max_position_embeddings
-
- if (self.head_dim * self.num_heads) != self.hidden_size:
- raise ValueError(
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
- f" and `num_heads`: {self.num_heads})."
- )
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
- self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
- self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
- self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
-
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: bool = False,
- use_cache: bool = False,
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- bsz, q_len, _ = hidden_states.size()
-
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-
- kv_seq_len = key_states.shape[-2]
- if past_key_value is not None:
- kv_seq_len += past_key_value[0].shape[-2]
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
- # [bsz, nh, t, hd]
-
- if past_key_value is not None:
- # reuse k, v, self_attention
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
- past_key_value = (key_states, value_states) if use_cache else None
-
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
- f" {attn_weights.size()}"
- )
-
- if attention_mask is not None:
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
- raise ValueError(
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
- )
- attn_weights = attn_weights + attention_mask
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
-
- # upcast attention to fp32
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
- attn_output = torch.matmul(attn_weights, value_states)
-
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
-
- attn_output = attn_output.transpose(1, 2)
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
-
- attn_output = self.o_proj(attn_output)
-
- if not output_attentions:
- attn_weights = None
-
- return attn_output, attn_weights, past_key_value
-
-
-class LlamaDecoderLayer(nn.Module):
- def __init__(self, config: LlamaConfig):
- super().__init__()
- self.hidden_size = config.hidden_size
- self.self_attn = LlamaAttention(config=config)
- self.mlp = LlamaMLP(
- hidden_size=self.hidden_size,
- intermediate_size=config.intermediate_size,
- hidden_act=config.hidden_act,
- )
- self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
- self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: Optional[bool] = False,
- use_cache: Optional[bool] = False,
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
- """
- Args:
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
- returned tensors for more detail.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
- (see `past_key_values`).
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
- """
-
- residual = hidden_states
-
- hidden_states = self.input_layernorm(hidden_states)
-
- # Self Attention
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
- hidden_states=hidden_states,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_value=past_key_value,
- output_attentions=output_attentions,
- use_cache=use_cache,
- )
- hidden_states = residual + hidden_states
-
- # Fully Connected
- residual = hidden_states
- hidden_states = self.post_attention_layernorm(hidden_states)
- hidden_states = self.mlp(hidden_states)
- hidden_states = residual + hidden_states
-
- outputs = (hidden_states,)
-
- if output_attentions:
- outputs += (self_attn_weights,)
-
- if use_cache:
- outputs += (present_key_value,)
-
- return outputs
-
-
-LLAMA_START_DOCSTRING = r"""
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
- etc.)
-
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
- and behavior.
-
- Parameters:
- config ([`LlamaConfig`]):
- Model configuration class with all the parameters of the model. Initializing with a config file does not
- load the weights associated with the model, only the configuration. Check out the
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-
-@add_start_docstrings(
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
- LLAMA_START_DOCSTRING,
-)
-class LlamaPreTrainedModel(PreTrainedModel):
- config_class = LlamaConfig
- base_model_prefix = "model"
- supports_gradient_checkpointing = True
- _no_split_modules = ["LlamaDecoderLayer"]
- _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
-
- def _init_weights(self, module):
- std = self.config.initializer_range
- if isinstance(module, nn.Linear):
- module.weight.data.normal_(mean=0.0, std=std)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=std)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, LlamaModel):
- module.gradient_checkpointing = value
-
-
-LLAMA_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
- it.
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
- `past_key_values`).
-
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
- information on the default strategy.
-
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.n_positions - 1]`.
-
- [What are position IDs?](../glossary#position-ids)
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
- `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
- `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
-
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
- blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
-
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
- model's internal embedding lookup matrix.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
- `past_key_values`).
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-
-@add_start_docstrings(
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
- LLAMA_START_DOCSTRING,
-)
-class LlamaModel(LlamaPreTrainedModel):
- """
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
-
- Args:
- config: LlamaConfig
- """
-
- def __init__(self, config: LlamaConfig):
- super().__init__(config)
- self.padding_idx = config.pad_token_id
- self.vocab_size = config.vocab_size
-
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
- self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
- self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
-
- self.gradient_checkpointing = False
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.embed_tokens
-
- def set_input_embeddings(self, value):
- self.embed_tokens = value
-
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
- # create causal mask
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- combined_attention_mask = None
- if input_shape[-1] > 1:
- combined_attention_mask = _make_causal_mask(
- input_shape,
- inputs_embeds.dtype,
- device=inputs_embeds.device,
- past_key_values_length=past_key_values_length,
- )
-
- if attention_mask is not None:
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
- inputs_embeds.device
- )
- combined_attention_mask = (
- expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
- )
-
- return combined_attention_mask
-
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- query_embeds: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPast]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
-
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- # retrieve input_ids and inputs_embeds
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
- elif input_ids is not None:
- batch_size, seq_length = input_ids.shape
- elif inputs_embeds is not None:
- batch_size, seq_length, _ = inputs_embeds.shape
- else:
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
-
- if inputs_embeds is None:
- inputs_embeds = self.embed_tokens(input_ids)
- if query_embeds is not None:
- inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
- batch_size, seq_length, _ = inputs_embeds.shape
-
- seq_length_with_past = seq_length
- past_key_values_length = 0
-
- if past_key_values is not None:
- past_key_values_length = past_key_values[0][0].shape[2]
- seq_length_with_past = seq_length_with_past + past_key_values_length
-
- if position_ids is None:
- device = input_ids.device if input_ids is not None else inputs_embeds.device
- position_ids = torch.arange(
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
- )
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
- else:
- position_ids = position_ids.view(-1, seq_length).long()
-
- # embed positions
- if attention_mask is None:
- attention_mask = torch.ones(
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
- )
- attention_mask = self._prepare_decoder_attention_mask(
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
- )
-
- hidden_states = inputs_embeds
-
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- # decoder layers
- all_hidden_states = () if output_hidden_states else None
- all_self_attns = () if output_attentions else None
- next_decoder_cache = () if use_cache else None
-
- for idx, decoder_layer in enumerate(self.layers):
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
-
- past_key_value = past_key_values[idx] if past_key_values is not None else None
-
- if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- # None for past_key_value
- return module(*inputs, output_attentions, None)
-
- return custom_forward
-
- layer_outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(decoder_layer),
- hidden_states,
- attention_mask,
- position_ids,
- None,
- )
- else:
- layer_outputs = decoder_layer(
- hidden_states,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_value=past_key_value,
- output_attentions=output_attentions,
- use_cache=use_cache,
- )
-
- hidden_states = layer_outputs[0]
-
- if use_cache:
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
-
- if output_attentions:
- all_self_attns += (layer_outputs[1],)
-
- hidden_states = self.norm(hidden_states)
-
- # add hidden states from the last decoder layer
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
-
- next_cache = next_decoder_cache if use_cache else None
- if not return_dict:
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
- return BaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=next_cache,
- hidden_states=all_hidden_states,
- attentions=all_self_attns,
- )
-
-
-class LlamaForCausalLM(LlamaPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.model = LlamaModel(config)
-
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.model.embed_tokens
-
- def set_input_embeddings(self, value):
- self.model.embed_tokens = value
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def set_decoder(self, decoder):
- self.model = decoder
-
- def get_decoder(self):
- return self.model
-
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- query_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, CausalLMOutputWithPast]:
- r"""
- Args:
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
-
- Returns:
-
- Example:
-
- ```python
- >>> from transformers import AutoTokenizer, LlamaForCausalLM
-
- >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
- >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
-
- >>> prompt = "Hey, are you consciours? Can you talk to me?"
- >>> inputs = tokenizer(prompt, return_tensors="pt")
-
- >>> # Generate
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
- "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
- ```"""
-
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
- outputs = self.model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- query_embeds=query_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- hidden_states = outputs[0]
- logits = self.lm_head(hidden_states)
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- # Flatten the tokens
- loss_fct = CrossEntropyLoss()
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
- shift_labels = shift_labels.view(-1)
- # Enable model parallelism
- shift_labels = shift_labels.to(shift_logits.device)
- loss = loss_fct(shift_logits, shift_labels)
-
- if not return_dict:
- output = (logits,) + outputs[1:]
- return (loss,) + output if loss is not None else output
-
- return CausalLMOutputWithPast(
- loss=loss,
- logits=logits,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- def prepare_inputs_for_generation(
- self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
- ):
- if past_key_values:
- input_ids = input_ids[:, -1:]
-
- position_ids = kwargs.get("position_ids", None)
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids.masked_fill_(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -1].unsqueeze(-1)
- query_embeds = None
-
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
- if inputs_embeds is not None and past_key_values is None:
- model_inputs = {"inputs_embeds": inputs_embeds}
- else:
- model_inputs = {"input_ids": input_ids}
-
- model_inputs.update(
- {
- "position_ids": position_ids,
- "query_embeds": query_embeds,
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "attention_mask": attention_mask,
- }
- )
- return model_inputs
-
- @staticmethod
- def _reorder_cache(past_key_values, beam_idx):
- reordered_past = ()
- for layer_past in past_key_values:
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
- return reordered_past
-
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/__init__.py
deleted file mode 100644
index 0bd8ec5e3b566d8a2d43a0904fd49db7862a21eb..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from .core import (
- infer_vegalite_type,
- infer_encoding_types,
- sanitize_dataframe,
- parse_shorthand,
- use_signature,
- update_nested,
- display_traceback,
- SchemaBase,
-)
-from .html import spec_to_html
-from .plugin_registry import PluginRegistry
-from .deprecation import AltairDeprecationWarning
-from .schemapi import Undefined
-
-
-__all__ = (
- "infer_vegalite_type",
- "infer_encoding_types",
- "sanitize_dataframe",
- "spec_to_html",
- "parse_shorthand",
- "use_signature",
- "update_nested",
- "display_traceback",
- "AltairDeprecationWarning",
- "SchemaBase",
- "Undefined",
- "PluginRegistry",
-)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/dependencies/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/dependencies/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/optimize/gpos.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/optimize/gpos.py
deleted file mode 100644
index 0acd9ed04c141c532cf7fafda220b3a898106415..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/optimize/gpos.py
+++ /dev/null
@@ -1,452 +0,0 @@
-import logging
-import os
-from collections import defaultdict, namedtuple
-from functools import reduce
-from itertools import chain
-from math import log2
-from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
-
-from fontTools.config import OPTIONS
-from fontTools.misc.intTools import bit_count, bit_indices
-from fontTools.ttLib import TTFont
-from fontTools.ttLib.tables import otBase, otTables
-
-log = logging.getLogger(__name__)
-
-COMPRESSION_LEVEL = OPTIONS[f"{__name__}:COMPRESSION_LEVEL"]
-
-# Kept because ufo2ft depends on it, to be removed once ufo2ft uses the config instead
-# https://github.com/fonttools/fonttools/issues/2592
-GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE"
-GPOS_COMPACT_MODE_DEFAULT = str(COMPRESSION_LEVEL.default)
-
-
-def _compression_level_from_env() -> int:
- env_level = GPOS_COMPACT_MODE_DEFAULT
- if GPOS_COMPACT_MODE_ENV_KEY in os.environ:
- import warnings
-
- warnings.warn(
- f"'{GPOS_COMPACT_MODE_ENV_KEY}' environment variable is deprecated. "
- "Please set the 'fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL' option "
- "in TTFont.cfg.",
- DeprecationWarning,
- )
-
- env_level = os.environ[GPOS_COMPACT_MODE_ENV_KEY]
- if len(env_level) == 1 and env_level in "0123456789":
- return int(env_level)
- raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={env_level}")
-
-
-def compact(font: TTFont, level: int) -> TTFont:
- # Ideal plan:
- # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable
- # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable
- # 2. Extract glyph-glyph kerning and class-kerning from all present subtables
- # 3. Regroup into different subtable arrangements
- # 4. Put back into the lookup
- #
- # Actual implementation:
- # 2. Only class kerning is optimized currently
- # 3. If the input kerning is already in several subtables, the subtables
- # are not grouped together first; instead each subtable is treated
- # independently, so currently this step is:
- # Split existing subtables into more smaller subtables
- gpos = font["GPOS"]
- for lookup in gpos.table.LookupList.Lookup:
- if lookup.LookupType == 2:
- compact_lookup(font, level, lookup)
- elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2:
- compact_ext_lookup(font, level, lookup)
- return font
-
-
-def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
- new_subtables = compact_pair_pos(font, level, lookup.SubTable)
- lookup.SubTable = new_subtables
- lookup.SubTableCount = len(new_subtables)
-
-
-def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
- new_subtables = compact_pair_pos(
- font, level, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable]
- )
- new_ext_subtables = []
- for subtable in new_subtables:
- ext_subtable = otTables.ExtensionPos()
- ext_subtable.Format = 1
- ext_subtable.ExtSubTable = subtable
- new_ext_subtables.append(ext_subtable)
- lookup.SubTable = new_ext_subtables
- lookup.SubTableCount = len(new_ext_subtables)
-
-
-def compact_pair_pos(
- font: TTFont, level: int, subtables: Sequence[otTables.PairPos]
-) -> Sequence[otTables.PairPos]:
- new_subtables = []
- for subtable in subtables:
- if subtable.Format == 1:
- # Not doing anything to Format 1 (yet?)
- new_subtables.append(subtable)
- elif subtable.Format == 2:
- new_subtables.extend(compact_class_pairs(font, level, subtable))
- return new_subtables
-
-
-def compact_class_pairs(
- font: TTFont, level: int, subtable: otTables.PairPos
-) -> List[otTables.PairPos]:
- from fontTools.otlLib.builder import buildPairPosClassesSubtable
-
- subtables = []
- classes1: DefaultDict[int, List[str]] = defaultdict(list)
- for g in subtable.Coverage.glyphs:
- classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g)
- classes2: DefaultDict[int, List[str]] = defaultdict(list)
- for g, i in subtable.ClassDef2.classDefs.items():
- classes2[i].append(g)
- all_pairs = {}
- for i, class1 in enumerate(subtable.Class1Record):
- for j, class2 in enumerate(class1.Class2Record):
- if is_really_zero(class2):
- continue
- all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = (
- getattr(class2, "Value1", None),
- getattr(class2, "Value2", None),
- )
- grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(font, all_pairs, level)
- for pairs in grouped_pairs:
- subtables.append(buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap()))
- return subtables
-
-
-def is_really_zero(class2: otTables.Class2Record) -> bool:
- v1 = getattr(class2, "Value1", None)
- v2 = getattr(class2, "Value2", None)
- return (v1 is None or v1.getEffectiveFormat() == 0) and (
- v2 is None or v2.getEffectiveFormat() == 0
- )
-
-
-Pairs = Dict[
- Tuple[Tuple[str, ...], Tuple[str, ...]],
- Tuple[otBase.ValueRecord, otBase.ValueRecord],
-]
-
-# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958
-def _getClassRanges(glyphIDs: Iterable[int]):
- glyphIDs = sorted(glyphIDs)
- last = glyphIDs[0]
- ranges = [[last]]
- for glyphID in glyphIDs[1:]:
- if glyphID != last + 1:
- ranges[-1].append(last)
- ranges.append([glyphID])
- last = glyphID
- ranges[-1].append(last)
- return ranges, glyphIDs[0], glyphIDs[-1]
-
-
-# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989
-def _classDef_bytes(
- class_data: List[Tuple[List[Tuple[int, int]], int, int]],
- class_ids: List[int],
- coverage=False,
-):
- if not class_ids:
- return 0
- first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]]
- range_count = len(first_ranges)
- for i in class_ids[1:]:
- data = class_data[i]
- range_count += len(data[0])
- min_glyph_id = min(min_glyph_id, data[1])
- max_glyph_id = max(max_glyph_id, data[2])
- glyphCount = max_glyph_id - min_glyph_id + 1
- # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1
- format1_bytes = 6 + glyphCount * 2
- # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2
- format2_bytes = 4 + range_count * 6
- return min(format1_bytes, format2_bytes)
-
-
-ClusteringContext = namedtuple(
- "ClusteringContext",
- [
- "lines",
- "all_class1",
- "all_class1_data",
- "all_class2_data",
- "valueFormat1_bytes",
- "valueFormat2_bytes",
- ],
-)
-
-
-class Cluster:
- # TODO(Python 3.7): Turn this into a dataclass
- # ctx: ClusteringContext
- # indices: int
- # Caches
- # TODO(Python 3.8): use functools.cached_property instead of the
- # manually cached properties, and remove the cache fields listed below.
- # _indices: Optional[List[int]] = None
- # _column_indices: Optional[List[int]] = None
- # _cost: Optional[int] = None
-
- __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost"
-
- def __init__(self, ctx: ClusteringContext, indices_bitmask: int):
- self.ctx = ctx
- self.indices_bitmask = indices_bitmask
- self._indices = None
- self._column_indices = None
- self._cost = None
-
- @property
- def indices(self):
- if self._indices is None:
- self._indices = bit_indices(self.indices_bitmask)
- return self._indices
-
- @property
- def column_indices(self):
- if self._column_indices is None:
- # Indices of columns that have a 1 in at least 1 line
- # => binary OR all the lines
- bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices))
- self._column_indices = bit_indices(bitmask)
- return self._column_indices
-
- @property
- def width(self):
- # Add 1 because Class2=0 cannot be used but needs to be encoded.
- return len(self.column_indices) + 1
-
- @property
- def cost(self):
- if self._cost is None:
- self._cost = (
- # 2 bytes to store the offset to this subtable in the Lookup table above
- 2
- # Contents of the subtable
- # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment
- # uint16 posFormat Format identifier: format = 2
- + 2
- # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable.
- + 2
- + self.coverage_bytes
- # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero).
- + 2
- # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero).
- + 2
- # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair.
- + 2
- + self.classDef1_bytes
- # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair.
- + 2
- + self.classDef2_bytes
- # uint16 class1Count Number of classes in classDef1 table — includes Class 0.
- + 2
- # uint16 class2Count Number of classes in classDef2 table — includes Class 0.
- + 2
- # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1.
- + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes)
- * len(self.indices)
- * self.width
- )
- return self._cost
-
- @property
- def coverage_bytes(self):
- format1_bytes = (
- # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1
- # uint16 coverageFormat Format identifier — format = 1
- # uint16 glyphCount Number of glyphs in the glyph array
- 4
- # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order
- + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2
- )
- ranges = sorted(
- chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices)
- )
- merged_range_count = 0
- last = None
- for (start, end) in ranges:
- if last is not None and start != last + 1:
- merged_range_count += 1
- last = end
- format2_bytes = (
- # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2
- # uint16 coverageFormat Format identifier — format = 2
- # uint16 rangeCount Number of RangeRecords
- 4
- # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID.
- # uint16 startGlyphID First glyph ID in the range
- # uint16 endGlyphID Last glyph ID in the range
- # uint16 startCoverageIndex Coverage Index of first glyph ID in range
- + merged_range_count * 6
- )
- return min(format1_bytes, format2_bytes)
-
- @property
- def classDef1_bytes(self):
- # We can skip encoding one of the Class1 definitions, and use
- # Class1=0 to represent it instead, because Class1 is gated by the
- # Coverage definition. Use Class1=0 for the highest byte savings.
- # Going through all options takes too long, pick the biggest class
- # = what happens in otlLib.builder.ClassDefBuilder.classes()
- biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i]))
- return _classDef_bytes(
- self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index]
- )
-
- @property
- def classDef2_bytes(self):
- # All Class2 need to be encoded because we can't use Class2=0
- return _classDef_bytes(self.ctx.all_class2_data, self.column_indices)
-
-
-def cluster_pairs_by_class2_coverage_custom_cost(
- font: TTFont,
- pairs: Pairs,
- compression: int = 5,
-) -> List[Pairs]:
- if not pairs:
- # The subtable was actually empty?
- return [pairs]
-
- # Sorted for reproducibility/determinism
- all_class1 = sorted(set(pair[0] for pair in pairs))
- all_class2 = sorted(set(pair[1] for pair in pairs))
-
- # Use Python's big ints for binary vectors representing each line
- lines = [
- sum(
- 1 << i if (class1, class2) in pairs else 0
- for i, class2 in enumerate(all_class2)
- )
- for class1 in all_class1
- ]
-
- # Map glyph names to ids and work with ints throughout for ClassDef formats
- name_to_id = font.getReverseGlyphMap()
- # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id)
- all_class1_data = [
- _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1
- ]
- all_class2_data = [
- _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2
- ]
-
- format1 = 0
- format2 = 0
- for pair, value in pairs.items():
- format1 |= value[0].getEffectiveFormat() if value[0] else 0
- format2 |= value[1].getEffectiveFormat() if value[1] else 0
- valueFormat1_bytes = bit_count(format1) * 2
- valueFormat2_bytes = bit_count(format2) * 2
-
- ctx = ClusteringContext(
- lines,
- all_class1,
- all_class1_data,
- all_class2_data,
- valueFormat1_bytes,
- valueFormat2_bytes,
- )
-
- cluster_cache: Dict[int, Cluster] = {}
-
- def make_cluster(indices: int) -> Cluster:
- cluster = cluster_cache.get(indices, None)
- if cluster is not None:
- return cluster
- cluster = Cluster(ctx, indices)
- cluster_cache[indices] = cluster
- return cluster
-
- def merge(cluster: Cluster, other: Cluster) -> Cluster:
- return make_cluster(cluster.indices_bitmask | other.indices_bitmask)
-
- # Agglomerative clustering by hand, checking the cost gain of the new
- # cluster against the previously separate clusters
- # Start with 1 cluster per line
- # cluster = set of lines = new subtable
- clusters = [make_cluster(1 << i) for i in range(len(lines))]
-
- # Cost of 1 cluster with everything
- # `(1 << len) - 1` gives a bitmask full of 1's of length `len`
- cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost
- log.debug(f" len(clusters) = {len(clusters)}")
-
- while len(clusters) > 1:
- lowest_cost_change = None
- best_cluster_index = None
- best_other_index = None
- best_merged = None
- for i, cluster in enumerate(clusters):
- for j, other in enumerate(clusters[i + 1 :]):
- merged = merge(cluster, other)
- cost_change = merged.cost - cluster.cost - other.cost
- if lowest_cost_change is None or cost_change < lowest_cost_change:
- lowest_cost_change = cost_change
- best_cluster_index = i
- best_other_index = i + 1 + j
- best_merged = merged
- assert lowest_cost_change is not None
- assert best_cluster_index is not None
- assert best_other_index is not None
- assert best_merged is not None
-
- # If the best merge we found is still taking down the file size, then
- # there's no question: we must do it, because it's beneficial in both
- # ways (lower file size and lower number of subtables). However, if the
- # best merge we found is not reducing file size anymore, then we need to
- # look at the other stop criteria = the compression factor.
- if lowest_cost_change > 0:
- # Stop critera: check whether we should keep merging.
- # Compute size reduction brought by splitting
- cost_after_splitting = sum(c.cost for c in clusters)
- # size_reduction so that after = before * (1 - size_reduction)
- # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2
- size_reduction = 1 - cost_after_splitting / cost_before_splitting
-
- # Force more merging by taking into account the compression number.
- # Target behaviour: compression number = 1 to 9, default 5 like gzip
- # - 1 = accept to add 1 subtable to reduce size by 50%
- # - 5 = accept to add 5 subtables to reduce size by 50%
- # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691
- # Given the size reduction we have achieved so far, compute how many
- # new subtables are acceptable.
- max_new_subtables = -log2(1 - size_reduction) * compression
- log.debug(
- f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}",
- )
- if compression == 9:
- # Override level 9 to mean: create any number of subtables
- max_new_subtables = len(clusters)
-
- # If we have managed to take the number of new subtables below the
- # threshold, then we can stop.
- if len(clusters) <= max_new_subtables + 1:
- break
-
- # No reason to stop yet, do the merge and move on to the next.
- del clusters[best_other_index]
- clusters[best_cluster_index] = best_merged
-
- # All clusters are final; turn bitmasks back into the "Pairs" format
- pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict)
- for pair, values in pairs.items():
- pairs_by_class1[pair[0]][pair] = values
- pairs_groups: List[Pairs] = []
- for cluster in clusters:
- pairs_group: Pairs = dict()
- for i in cluster.indices:
- class1 = all_class1[i]
- pairs_group.update(pairs_by_class1[class1])
- pairs_groups.append(pairs_group)
- return pairs_groups
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttx.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttx.py
deleted file mode 100644
index 65a3c7a808b41fc571d59bac80f7b1085abc6b9b..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttx.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""\
-usage: ttx [options] inputfile1 [... inputfileN]
-
-TTX -- From OpenType To XML And Back
-
-If an input file is a TrueType or OpenType font file, it will be
-decompiled to a TTX file (an XML-based text format).
-If an input file is a TTX file, it will be compiled to whatever
-format the data is in, a TrueType or OpenType/CFF font file.
-A special input value of - means read from the standard input.
-
-Output files are created so they are unique: an existing file is
-never overwritten.
-
-General options
-===============
-
--h Help print this message.
---version show version and exit.
--d Specify a directory where the output files are
- to be created.
--o Specify a file to write the output to. A special
- value of - would use the standard output.
--f Overwrite existing output file(s), ie. don't append
- numbers.
--v Verbose: more messages will be written to stdout
- about what is being done.
--q Quiet: No messages will be written to stdout about
- what is being done.
--a allow virtual glyphs ID's on compile or decompile.
-
-Dump options
-============
-
--l List table info: instead of dumping to a TTX file, list
- some minimal info about each table.
--t
Specify a table to dump. Multiple -t options
- are allowed. When no -t option is specified, all tables
- will be dumped.
--x
Specify a table to exclude from the dump. Multiple
- -x options are allowed. -t and -x are mutually exclusive.
--s Split tables: save the TTX data into separate TTX files per
- table and write one small TTX file that contains references
- to the individual table dumps. This file can be used as
- input to ttx, as long as the table files are in the
- same directory.
--g Split glyf table: Save the glyf data into separate TTX files
- per glyph and write a small TTX for the glyf table which
- contains references to the individual TTGlyph elements.
- NOTE: specifying -g implies -s (no need for -s together
- with -g)
--i Do NOT disassemble TT instructions: when this option is
- given, all TrueType programs (glyph programs, the font
- program and the pre-program) will be written to the TTX
- file as hex data instead of assembly. This saves some time
- and makes the TTX file smaller.
--z Specify a bitmap data export option for EBDT:
- {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
- {'raw', 'extfile'} Each option does one of the following:
-
- -z raw
- export the bitmap data as a hex dump
- -z row
- export each row as hex data
- -z bitwise
- export each row as binary in an ASCII art style
- -z extfile
- export the data as external files with XML references
-
- If no export format is specified 'raw' format is used.
--e Don't ignore decompilation errors, but show a full traceback
- and abort.
--y Select font number for TrueType Collection (.ttc/.otc),
- starting from 0.
---unicodedata
- Use custom database file to write character names in the
- comments of the cmap TTX output.
---newline
- Control how line endings are written in the XML file. It
- can be 'LF', 'CR', or 'CRLF'. If not specified, the
- default platform-specific line endings are used.
-
-Compile options
-===============
-
--m Merge with TrueType-input-file: specify a TrueType or
- OpenType font file to be merged with the TTX file. This
- option is only valid when at most one TTX file is specified.
--b Don't recalc glyph bounding boxes: use the values in the
- TTX file as-is.
---recalc-timestamp
- Set font 'modified' timestamp to current time.
- By default, the modification time of the TTX file will be
- used.
---no-recalc-timestamp
- Keep the original font 'modified' timestamp.
---flavor
- Specify flavor of output font file. May be 'woff' or 'woff2'.
- Note that WOFF2 requires the Brotli Python extension,
- available at https://github.com/google/brotli
---with-zopfli
- Use Zopfli instead of Zlib to compress WOFF. The Python
- extension is available at https://pypi.python.org/pypi/zopfli
-"""
-
-
-from fontTools.ttLib import TTFont, TTLibError
-from fontTools.misc.macCreatorType import getMacCreatorAndType
-from fontTools.unicode import setUnicodeData
-from fontTools.misc.textTools import Tag, tostr
-from fontTools.misc.timeTools import timestampSinceEpoch
-from fontTools.misc.loggingTools import Timer
-from fontTools.misc.cliTools import makeOutputFileName
-import os
-import sys
-import getopt
-import re
-import logging
-
-
-log = logging.getLogger("fontTools.ttx")
-
-opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
-
-
-class Options(object):
-
- listTables = False
- outputDir = None
- outputFile = None
- overWrite = False
- verbose = False
- quiet = False
- splitTables = False
- splitGlyphs = False
- disassembleInstructions = True
- mergeFile = None
- recalcBBoxes = True
- ignoreDecompileErrors = True
- bitmapGlyphDataFormat = "raw"
- unicodedata = None
- newlinestr = "\n"
- recalcTimestamp = None
- flavor = None
- useZopfli = False
-
- def __init__(self, rawOptions, numFiles):
- self.onlyTables = []
- self.skipTables = []
- self.fontNumber = -1
- for option, value in rawOptions:
- # general options
- if option == "-h":
- print(__doc__)
- sys.exit(0)
- elif option == "--version":
- from fontTools import version
-
- print(version)
- sys.exit(0)
- elif option == "-d":
- if not os.path.isdir(value):
- raise getopt.GetoptError(
- "The -d option value must be an existing directory"
- )
- self.outputDir = value
- elif option == "-o":
- self.outputFile = value
- elif option == "-f":
- self.overWrite = True
- elif option == "-v":
- self.verbose = True
- elif option == "-q":
- self.quiet = True
- # dump options
- elif option == "-l":
- self.listTables = True
- elif option == "-t":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.onlyTables.append(value)
- elif option == "-x":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.skipTables.append(value)
- elif option == "-s":
- self.splitTables = True
- elif option == "-g":
- # -g implies (and forces) splitTables
- self.splitGlyphs = True
- self.splitTables = True
- elif option == "-i":
- self.disassembleInstructions = False
- elif option == "-z":
- validOptions = ("raw", "row", "bitwise", "extfile")
- if value not in validOptions:
- raise getopt.GetoptError(
- "-z does not allow %s as a format. Use %s"
- % (option, validOptions)
- )
- self.bitmapGlyphDataFormat = value
- elif option == "-y":
- self.fontNumber = int(value)
- # compile options
- elif option == "-m":
- self.mergeFile = value
- elif option == "-b":
- self.recalcBBoxes = False
- elif option == "-e":
- self.ignoreDecompileErrors = False
- elif option == "--unicodedata":
- self.unicodedata = value
- elif option == "--newline":
- validOptions = ("LF", "CR", "CRLF")
- if value == "LF":
- self.newlinestr = "\n"
- elif value == "CR":
- self.newlinestr = "\r"
- elif value == "CRLF":
- self.newlinestr = "\r\n"
- else:
- raise getopt.GetoptError(
- "Invalid choice for --newline: %r (choose from %s)"
- % (value, ", ".join(map(repr, validOptions)))
- )
- elif option == "--recalc-timestamp":
- self.recalcTimestamp = True
- elif option == "--no-recalc-timestamp":
- self.recalcTimestamp = False
- elif option == "--flavor":
- self.flavor = value
- elif option == "--with-zopfli":
- self.useZopfli = True
- if self.verbose and self.quiet:
- raise getopt.GetoptError("-q and -v options are mutually exclusive")
- if self.verbose:
- self.logLevel = logging.DEBUG
- elif self.quiet:
- self.logLevel = logging.WARNING
- else:
- self.logLevel = logging.INFO
- if self.mergeFile and self.flavor:
- raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
- if self.onlyTables and self.skipTables:
- raise getopt.GetoptError("-t and -x options are mutually exclusive")
- if self.mergeFile and numFiles > 1:
- raise getopt.GetoptError(
- "Must specify exactly one TTX source file when using -m"
- )
- if self.flavor != "woff" and self.useZopfli:
- raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
-
-
-def ttList(input, output, options):
- ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
- reader = ttf.reader
- tags = sorted(reader.keys())
- print('Listing table info for "%s":' % input)
- format = " %4s %10s %8s %8s"
- print(format % ("tag ", " checksum", " length", " offset"))
- print(format % ("----", "----------", "--------", "--------"))
- for tag in tags:
- entry = reader.tables[tag]
- if ttf.flavor == "woff2":
- # WOFF2 doesn't store table checksums, so they must be calculated
- from fontTools.ttLib.sfnt import calcChecksum
-
- data = entry.loadData(reader.transformBuffer)
- checkSum = calcChecksum(data)
- else:
- checkSum = int(entry.checkSum)
- if checkSum < 0:
- checkSum = checkSum + 0x100000000
- checksum = "0x%08X" % checkSum
- print(format % (tag, checksum, entry.length, entry.offset))
- print()
- ttf.close()
-
-
-@Timer(log, "Done dumping TTX in %(time).3f seconds")
-def ttDump(input, output, options):
- input_name = input
- if input == "-":
- input, input_name = sys.stdin.buffer, sys.stdin.name
- output_name = output
- if output == "-":
- output, output_name = sys.stdout, sys.stdout.name
- log.info('Dumping "%s" to "%s"...', input_name, output_name)
- if options.unicodedata:
- setUnicodeData(options.unicodedata)
- ttf = TTFont(
- input,
- 0,
- ignoreDecompileErrors=options.ignoreDecompileErrors,
- fontNumber=options.fontNumber,
- )
- ttf.saveXML(
- output,
- tables=options.onlyTables,
- skipTables=options.skipTables,
- splitTables=options.splitTables,
- splitGlyphs=options.splitGlyphs,
- disassembleInstructions=options.disassembleInstructions,
- bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
- newlinestr=options.newlinestr,
- )
- ttf.close()
-
-
-@Timer(log, "Done compiling TTX in %(time).3f seconds")
-def ttCompile(input, output, options):
- input_name = input
- if input == "-":
- input, input_name = sys.stdin, sys.stdin.name
- output_name = output
- if output == "-":
- output, output_name = sys.stdout.buffer, sys.stdout.name
- log.info('Compiling "%s" to "%s"...' % (input_name, output))
- if options.useZopfli:
- from fontTools.ttLib import sfnt
-
- sfnt.USE_ZOPFLI = True
- ttf = TTFont(
- options.mergeFile,
- flavor=options.flavor,
- recalcBBoxes=options.recalcBBoxes,
- recalcTimestamp=options.recalcTimestamp,
- )
- ttf.importXML(input)
-
- if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
- # use TTX file modification time for head "modified" timestamp
- mtime = os.path.getmtime(input)
- ttf["head"].modified = timestampSinceEpoch(mtime)
-
- ttf.save(output)
-
-
-def guessFileType(fileName):
- if fileName == "-":
- header = sys.stdin.buffer.peek(256)
- ext = ""
- else:
- base, ext = os.path.splitext(fileName)
- try:
- with open(fileName, "rb") as f:
- header = f.read(256)
- except IOError:
- return None
-
- if header.startswith(b"\xef\xbb\xbf> /etc/apt/sources.list.d/google-chrome.list \
- && apt-get update \
- && apt-get install -y chromium firefox-esr
-
-# Set environment variables
-ENV PIP_NO_CACHE_DIR=yes \
- PYTHONUNBUFFERED=1 \
- PYTHONDONTWRITEBYTECODE=1
-
-# Create a non-root user and set permissions
-RUN useradd --create-home appuser
-WORKDIR /home/appuser
-RUN chown appuser:appuser /home/appuser
-USER appuser
-
-# Copy the requirements.txt file and install the requirements
-COPY --chown=appuser:appuser requirements.txt .
-RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
- pip install --no-cache-dir --user -r requirements.txt
-
-# Copy the application files
-COPY --chown=appuser:appuser autogpt/ ./autogpt
-
-# Set the entrypoint
-ENTRYPOINT ["python", "-m", "autogpt"]
diff --git a/spaces/DaleChen/AutoGPT/autogpt/processing/text.py b/spaces/DaleChen/AutoGPT/autogpt/processing/text.py
deleted file mode 100644
index 52add81401775c1b111512d8149f86a175fd9acb..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/autogpt/processing/text.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""Text processing functions"""
-from typing import Dict, Generator, Optional
-
-from selenium.webdriver.remote.webdriver import WebDriver
-
-from autogpt.config import Config
-from autogpt.llm_utils import create_chat_completion
-from autogpt.memory import get_memory
-
-CFG = Config()
-MEMORY = get_memory(CFG)
-
-
-def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
- """Split text into chunks of a maximum length
-
- Args:
- text (str): The text to split
- max_length (int, optional): The maximum length of each chunk. Defaults to 8192.
-
- Yields:
- str: The next chunk of text
-
- Raises:
- ValueError: If the text is longer than the maximum length
- """
- paragraphs = text.split("\n")
- current_length = 0
- current_chunk = []
-
- for paragraph in paragraphs:
- if current_length + len(paragraph) + 1 <= max_length:
- current_chunk.append(paragraph)
- current_length += len(paragraph) + 1
- else:
- yield "\n".join(current_chunk)
- current_chunk = [paragraph]
- current_length = len(paragraph) + 1
-
- if current_chunk:
- yield "\n".join(current_chunk)
-
-
-def summarize_text(
- url: str, text: str, question: str, driver: Optional[WebDriver] = None
-) -> str:
- """Summarize text using the OpenAI API
-
- Args:
- url (str): The url of the text
- text (str): The text to summarize
- question (str): The question to ask the model
- driver (WebDriver): The webdriver to use to scroll the page
-
- Returns:
- str: The summary of the text
- """
- if not text:
- return "Error: No text to summarize"
-
- text_length = len(text)
- print(f"Text length: {text_length} characters")
-
- summaries = []
- chunks = list(split_text(text))
- scroll_ratio = 1 / len(chunks)
-
- for i, chunk in enumerate(chunks):
- if driver:
- scroll_to_percentage(driver, scroll_ratio * i)
- print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
-
- memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
-
- MEMORY.add(memory_to_add)
-
- print(f"Summarizing chunk {i + 1} / {len(chunks)}")
- messages = [create_message(chunk, question)]
-
- summary = create_chat_completion(
- model=CFG.fast_llm_model,
- messages=messages,
- )
- summaries.append(summary)
- print(f"Added chunk {i + 1} summary to memory")
-
- memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
-
- MEMORY.add(memory_to_add)
-
- print(f"Summarized {len(chunks)} chunks.")
-
- combined_summary = "\n".join(summaries)
- messages = [create_message(combined_summary, question)]
-
- return create_chat_completion(
- model=CFG.fast_llm_model,
- messages=messages,
- )
-
-
-def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
- """Scroll to a percentage of the page
-
- Args:
- driver (WebDriver): The webdriver to use
- ratio (float): The percentage to scroll to
-
- Raises:
- ValueError: If the ratio is not between 0 and 1
- """
- if ratio < 0 or ratio > 1:
- raise ValueError("Percentage should be between 0 and 1")
- driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
-
-
-def create_message(chunk: str, question: str) -> Dict[str, str]:
- """Create a message for the chat completion
-
- Args:
- chunk (str): The chunk of text to summarize
- question (str): The question to answer
-
- Returns:
- Dict[str, str]: The message to send to the chat completion
- """
- return {
- "role": "user",
- "content": f'"""{chunk}""" Using the above text, answer the following'
- f' question: "{question}" -- if the question cannot be answered using the text,'
- " summarize the text.",
- }
diff --git a/spaces/Danielzero/GPT3.5/run_Windows.bat b/spaces/Danielzero/GPT3.5/run_Windows.bat
deleted file mode 100644
index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000
--- a/spaces/Danielzero/GPT3.5/run_Windows.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
diff --git a/spaces/DarwinAnim8or/NoSleep-Story-Generator/README.md b/spaces/DarwinAnim8or/NoSleep-Story-Generator/README.md
deleted file mode 100644
index 7961a63b5a887d37ff4c5beefbe0d32d12a50896..0000000000000000000000000000000000000000
--- a/spaces/DarwinAnim8or/NoSleep-Story-Generator/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NoSleep Story Generator
-emoji: 😱
-colorFrom: grey
-colorTo: black
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: true
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/coaches/__init__.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/coaches/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/DragGan/DragGan/torch_utils/ops/bias_act.h b/spaces/DragGan/DragGan/torch_utils/ops/bias_act.h
deleted file mode 100644
index 60b81c6058d54638a6d74a13046fa388442d767d..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/torch_utils/ops/bias_act.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-//------------------------------------------------------------------------
-// CUDA kernel parameters.
-
-struct bias_act_kernel_params
-{
- const void* x; // [sizeX]
- const void* b; // [sizeB] or NULL
- const void* xref; // [sizeX] or NULL
- const void* yref; // [sizeX] or NULL
- const void* dy; // [sizeX] or NULL
- void* y; // [sizeX]
-
- int grad;
- int act;
- float alpha;
- float gain;
- float clamp;
-
- int sizeX;
- int sizeB;
- int stepB;
- int loopX;
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel selection.
-
-template void* choose_bias_act_kernel(const bias_act_kernel_params& p);
-
-//------------------------------------------------------------------------
diff --git a/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/README.md b/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/README.md
deleted file mode 100644
index fb45a36b5909585aa964f2033762ee59b55526b0..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# External Colab Code
-Code used to make Google Colab work correctly
-- Repo link: https://github.com/IAHispano/Applio-RVC-Fork/
-
-Thanks to https://github.com/kalomaze/externalcolabcode
-
diff --git a/spaces/EuroPython2022/clickbaitonator/app.py b/spaces/EuroPython2022/clickbaitonator/app.py
deleted file mode 100644
index 09dc263468be832bf78ef7f0d0df54510ebd696b..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/clickbaitonator/app.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import gradio as gr
-from fudge.predict_clickbait import generate_clickbait, tokenizer, classifier_tokenizer
-from datasets import load_dataset,DatasetDict,Dataset
-# from datasets import
-from transformers import AutoTokenizer,AutoModelForSeq2SeqLM
-import numpy as np
-from sklearn.model_selection import train_test_split
-import pandas as pd
-from sklearn.utils.class_weight import compute_class_weight
-import torch
-import pandas as pd
-from fudge.model import Model
-import os
-from argparse import ArgumentParser
-from collections import namedtuple
-import mock
-
-from tqdm import tqdm
-import numpy as np
-import torch.nn as nn
-import torch.nn.functional as F
-from fudge.data import Dataset
-from fudge.util import save_checkpoint, ProgressMeter, AverageMeter, num_params
-from fudge.constants import *
-
-
-device = 'cpu'
-# imp.reload(model)
-pretrained_model = "checkpoint-150/"
-generation_model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model, return_dict=True).to(device)
-
-
-pad_id = 0
-
-generation_model.eval()
-
-model_args = mock.Mock()
-model_args.task = 'clickbait'
-model_args.device = device
-model_args.checkpoint = 'checkpoint-1464/'
-
-# conditioning_model = Model(model_args, pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway
-conditioning_model = Model(model_args, pad_id, vocab_size=None) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway
-conditioning_model = conditioning_model.to(device)
-conditioning_model.eval()
-
-condition_lambda = 5.0
-length_cutoff = 50
-precondition_topk = 200
-
-
-conditioning_model.classifier
-
-model_args.checkpoint
-
-classifier_tokenizer = AutoTokenizer.from_pretrained(model_args.checkpoint, load_best_model_at_end=True)
-
-
-def rate_title(input_text, model, tokenizer, device='cuda'):
- # input_text = {
- # "postText": input_text['postText'],
- # "truthClass" : input_text['truthClass']
- # }
- tokenized_input = preprocess_function_title_only_classification(input_text,tokenizer=tokenizer)
- # print(tokenized_input.items())
- dict_tokenized_input = {k : torch.tensor([v]).to(device) for k,v in tokenized_input.items() if k != 'labels'}
- predicted_class = float(model(**dict_tokenized_input).logits)
- actual_class = input_text['truthClass']
-
- # print(predicted_class, actual_class)
- return {'predicted_class' : predicted_class}
-
-def preprocess_function_title_only_classification(examples,tokenizer=None):
- model_inputs = tokenizer(examples['postText'], padding="longest", truncation=True, max_length=25)
-
- model_inputs['labels'] = examples['truthClass']
-
- return model_inputs
-
-
-
-input_example = "On Friday, a clip of Los Angeles Lakers star LeBron James from the latest episode of \"The Shop: Uninterrupted\" is going viral on Twitter. \"Cause they racist as f--k,\" James said when asked why he hates Boston. James has had many battles with the Boston Celtics in the NBA Playoffs. According to StatMuse, he has played the Celtics 41 times in the NBA Playoffs. He's played them in the playoffs when he was on the Cleveland Cavaliers (the first time), the Miami Heat and the Cavs (the second time). Therefore, he has had quite the experience facing off with them in hostile environments. He is 25-16 against them in the 41 playoff games and averaged 29.6 points per game. (also according to StatMuse). James is currently on the Los Angeles Lakers, and the team missed the postseason this past year. They were the 11th seed in the Western Conference, so they also missed the play-in tournament which was a big surprise. His first year in Los Angeles, they also missed the playoffs, but the following season he led them to their first NBA Championship since the 2010 season. In 2021, they lost in the first-round, so they have been on a downward trajectory since winning the title. Next season will be his 20th season in the NBA, and he is widely regarded as one of the top-five (and to some the greatest) player ever to play the game of basketball. He is 37-years-old, and was the first overall pick out of high school in the 2003 NBA Draft. "
-
-output_example = "Here's why Lebron James hates the Celtics"
-textbox_input = gr.Textbox(label = "Article content",
- value=input_example)
-textbox_output = gr.Textbox(label = "Output clickbait title",
- value=output_example)
-
-
-def clickbait_generator(article_content, condition_lambda=5.0):
- results = generate_clickbait(model=generation_model,
- tokenizer=tokenizer,
- conditioning_model=conditioning_model,
- input_text=[None],
- dataset_info=None,
- precondition_topk=precondition_topk,
- length_cutoff=length_cutoff,
- condition_lambda=condition_lambda,
- article_content=article_content,
- device=device)
-
- return results[0].replace('', '').replace('', '')
-
-title = "Clickbaitinator - Controllable Clickbait generator"
-description = """
-Use the [Fudge](https://github.com/yangkevin2/naacl-2021-fudge-controlled-generation) implementation fine-tuned for our purposes to try and create news headline you are looking for! Use condition_lambda to steer your clickbaitiness higher (by increasing the slider value) or lower (by decreasing the slider value).
-Note that this is using two Transformers and is executed with CPU-only, so it will take a minute or two to finish generating a title.
-"""
-
-article = "Check out [the codebase for our model](https://github.com/dsvilarkovic/clickbaitonator) that this demo is based of. You need collaborator access, which you have been probably invited for."
-
-
-app = gr.Interface(
- title = title,
- description = description,
- label = 'Article content or paragraph',
- fn = clickbait_generator,
- inputs=[textbox_input, gr.Slider(0, 15, step=0.1, value=5.0)],
- outputs=textbox_output,
- article=article,
- )
-app.launch()
\ No newline at end of file
diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/models.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/models.py
deleted file mode 100644
index 4cfc5c4c9920cbd1a082f83e861faf86cdd41e74..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-pcr/models.py
+++ /dev/null
@@ -1,420 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import modules.attentions as attentions
-import modules.commons as commons
-import modules.modules as modules
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-
-import utils
-from modules.commons import init_weights, get_padding
-from vdecoder.hifigan.models import Generator
-from utils import f0_to_coarse
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- # print(x.shape,x_lengths.shape)
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- out_channels,
- hidden_channels,
- kernel_size,
- n_layers,
- gin_channels=0,
- filter_channels=None,
- n_heads=None,
- p_dropout=None):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
- self.f0_emb = nn.Embedding(256, hidden_channels)
-
- self.enc_ = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
-
- def forward(self, x, x_mask, f0=None, noice_scale=1):
- x = x + self.f0_emb(f0).transpose(1,2)
- x = self.enc_(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
-
- return z, m, logs, x_mask
-
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SpeakerEncoder(torch.nn.Module):
- def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
- super(SpeakerEncoder, self).__init__()
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
- self.relu = nn.ReLU()
-
- def forward(self, mels):
- self.lstm.flatten_parameters()
- _, (hidden, _) = self.lstm(mels)
- embeds_raw = self.relu(self.linear(hidden[-1]))
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
-
- def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
- mel_slices = []
- for i in range(0, total_frames-partial_frames, partial_hop):
- mel_range = torch.arange(i, i+partial_frames)
- mel_slices.append(mel_range)
-
- return mel_slices
-
- def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
- mel_len = mel.size(1)
- last_mel = mel[:,-partial_frames:]
-
- if mel_len > partial_frames:
- mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
- mels = list(mel[:,s] for s in mel_slices)
- mels.append(last_mel)
- mels = torch.stack(tuple(mels), 0).squeeze(1)
-
- with torch.no_grad():
- partial_embeds = self(mels)
- embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
- #embed = embed / torch.linalg.norm(embed, 2)
- else:
- with torch.no_grad():
- embed = self(last_mel)
-
- return embed
-
-class F0Decoder(nn.Module):
- def __init__(self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- spk_channels=0):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.spk_channels = spk_channels
-
- self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
- self.decoder = attentions.FFT(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1)
- self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
-
- def forward(self, x, norm_f0, x_mask, spk_emb=None):
- x = torch.detach(x)
- if (spk_emb is not None):
- x = x + self.cond(spk_emb)
- x += self.f0_prenet(norm_f0)
- x = self.prenet(x) * x_mask
- x = self.decoder(x * x_mask, x_mask)
- x = self.proj(x) * x_mask
- return x
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- ssl_dim,
- n_speakers,
- sampling_rate=44100,
- **kwargs):
-
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- self.ssl_dim = ssl_dim
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
-
- self.enc_p = TextEncoder(
- inter_channels,
- hidden_channels,
- filter_channels=filter_channels,
- n_heads=n_heads,
- n_layers=n_layers,
- kernel_size=kernel_size,
- p_dropout=p_dropout
- )
- hps = {
- "sampling_rate": sampling_rate,
- "inter_channels": inter_channels,
- "resblock": resblock,
- "resblock_kernel_sizes": resblock_kernel_sizes,
- "resblock_dilation_sizes": resblock_dilation_sizes,
- "upsample_rates": upsample_rates,
- "upsample_initial_channel": upsample_initial_channel,
- "upsample_kernel_sizes": upsample_kernel_sizes,
- "gin_channels": gin_channels,
- }
- self.dec = Generator(h=hps)
- self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
- self.f0_decoder = F0Decoder(
- 1,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- spk_channels=gin_channels
- )
- self.emb_uv = nn.Embedding(2, hidden_channels)
-
- def forward(self, c, f0, uv, spec, g=None, c_lengths=None, spec_lengths=None):
- g = self.emb_g(g).transpose(1,2)
- # ssl prenet
- x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
- x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
-
- # f0 predict
- lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
- norm_lf0 = utils.normalize_f0(lf0, x_mask, uv)
- pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
-
- # encoder
- z_ptemp, m_p, logs_p, _ = self.enc_p(x, x_mask, f0=f0_to_coarse(f0))
- z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
-
- # flow
- z_p = self.flow(z, spec_mask, g=g)
- z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size)
-
- # nsf decoder
- o = self.dec(z_slice, g=g, f0=pitch_slice)
-
- return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0
-
- def infer(self, c, f0, uv, g=None, noice_scale=0.35, predict_f0=False):
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- g = self.emb_g(g).transpose(1,2)
- x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
- x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
-
- if predict_f0:
- lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
- norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False)
- pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
- f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1)
-
- z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), noice_scale=noice_scale)
- z = self.flow(z_p, c_mask, g=g, reverse=True)
- o = self.dec(z * c_mask, g=g, f0=f0)
- return o,f0
diff --git a/spaces/Gazoche/text-to-gundam/README.md b/spaces/Gazoche/text-to-gundam/README.md
deleted file mode 100644
index e352b64e47a521cdef4225da109a2c3d1666afc7..0000000000000000000000000000000000000000
--- a/spaces/Gazoche/text-to-gundam/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Text To Gundam
-emoji: 🐢
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.5
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gpt_generalization.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gpt_generalization.sh
deleted file mode 100644
index 79cd3381fe46511bffc0ecd6317613c21ab6caa9..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gpt_generalization.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-#SBATCH -c 10
-#SBATCH -n 1
-#SBATCH -o logs/%j.out
-#SBATCH --exclusive
-STEPS=${1-'50000'}
-
-
-sh scripts/traintest_scripts/train_test_multi_task_goal.sh data \
-"[mix-piles,rainbow-stack,manipulating-two-ropes,insert-sphere-into-container,align-pair-colored-blocks-along-line,construct-corner-building,colorful_block-tower-on-cylinder-base,build-bridge,push_piles-into-letter]"\
-"[sorting-blocks-into-pallets,build-two-circles,align-cylinders-in-square,Four-corner-pyramid-challenge,corner-sort-cylinders]" \
-gpt10task_gen $STEPS
diff --git a/spaces/Gradio-Blocks/Alexa-NLU-Clone/app.py b/spaces/Gradio-Blocks/Alexa-NLU-Clone/app.py
deleted file mode 100644
index 1414e2da5c0fdf76013fddb327b8bee626bb6dab..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/Alexa-NLU-Clone/app.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import gradio as gr
-
-import os
-import torch
-import librosa
-from glob import glob
-from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline, AutoModelForTokenClassification, TokenClassificationPipeline, Wav2Vec2ForCTC, Wav2Vec2Processor, Wav2Vec2ProcessorWithLM
-
-SAMPLE_RATE = 16_000
-
-models = {}
-
-models_paths = {
- "en-US": "jonatasgrosman/wav2vec2-large-xlsr-53-english",
- "fr-FR": "jonatasgrosman/wav2vec2-large-xlsr-53-french",
- "nl-NL": "jonatasgrosman/wav2vec2-large-xlsr-53-dutch",
- "pl-PL": "jonatasgrosman/wav2vec2-large-xlsr-53-polish",
- "it-IT": "jonatasgrosman/wav2vec2-large-xlsr-53-italian",
- "ru-RU": "jonatasgrosman/wav2vec2-large-xlsr-53-russian",
- "pt-PT": "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese",
- "de-DE": "jonatasgrosman/wav2vec2-large-xlsr-53-german",
- "es-ES": "jonatasgrosman/wav2vec2-large-xlsr-53-spanish",
- "ja-JP": "jonatasgrosman/wav2vec2-large-xlsr-53-japanese",
- "ar-SA": "jonatasgrosman/wav2vec2-large-xlsr-53-arabic",
- "fi-FI": "jonatasgrosman/wav2vec2-large-xlsr-53-finnish",
- "hu-HU": "jonatasgrosman/wav2vec2-large-xlsr-53-hungarian",
- "zh-CN": "jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn",
- "el-GR": "jonatasgrosman/wav2vec2-large-xlsr-53-greek",
-}
-
-# Classifier Intent
-model_name = 'qanastek/XLMRoberta-Alexa-Intents-Classification'
-tokenizer_intent = AutoTokenizer.from_pretrained(model_name)
-model_intent = AutoModelForSequenceClassification.from_pretrained(model_name)
-classifier_intent = TextClassificationPipeline(model=model_intent, tokenizer=tokenizer_intent)
-
-# Classifier Language
-model_name = 'qanastek/51-languages-classifier'
-tokenizer_langs = AutoTokenizer.from_pretrained(model_name)
-model_langs = AutoModelForSequenceClassification.from_pretrained(model_name)
-classifier_language = TextClassificationPipeline(model=model_langs, tokenizer=tokenizer_langs)
-
-# NER Extractor
-model_name = 'qanastek/XLMRoberta-Alexa-Intents-NER-NLU'
-tokenizer_ner = AutoTokenizer.from_pretrained(model_name)
-model_ner = AutoModelForTokenClassification.from_pretrained(model_name)
-predict_ner = TokenClassificationPipeline(model=model_ner, tokenizer=tokenizer_ner)
-
-EXAMPLE_DIR = './wavs/'
-examples = sorted(glob(os.path.join(EXAMPLE_DIR, '*.wav')))
-examples = [[e, e.split("=")[0].split("/")[-1]] for e in examples]
-
-def transcribe(audio_path, lang_code):
-
- speech_array, sampling_rate = librosa.load(audio_path, sr=16_000)
-
- if lang_code not in models:
- models[lang_code] = {}
- models[lang_code]["processor"] = Wav2Vec2Processor.from_pretrained(models_paths[lang_code])
- models[lang_code]["model"] = Wav2Vec2ForCTC.from_pretrained(models_paths[lang_code])
-
- # Load model
- processor_asr = models[lang_code]["processor"]
- model_asr = models[lang_code]["model"]
-
- inputs = processor_asr(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True)
-
- with torch.no_grad():
- logits = model_asr(inputs.input_values, attention_mask=inputs.attention_mask).logits
-
- predicted_ids = torch.argmax(logits, dim=-1)
-
- return processor_asr.batch_decode(predicted_ids)[0]
-
-def getUniform(text):
-
- idx = 0
- res = {}
-
- for t in text:
-
- raw = t["entity"].replace("B-","").replace("I-","")
- word = t["word"].replace("▁","")
-
- if "B-" in t["entity"]:
- res[f"{raw}|{idx}"] = [word]
- idx += 1
- else:
- res[f"{raw}|{idx}"].append(word)
-
- res = [(r.split("|")[0], res[r]) for r in res]
-
- return res
-
-
-def predict(wav_file, lang_code):
-
- if lang_code not in models_paths.keys():
-
- return {
- "The language code is unknown!"
- }
-
- text = transcribe(wav_file, lang_code).replace("apizza","a pizza") + " ."
-
- intent_class = classifier_intent(text)[0]["label"]
- language_class = classifier_language(text)[0]["label"]
- named_entities = getUniform(predict_ner(text))
-
- return {
- "text": text,
- "language": language_class,
- "intent_class": intent_class,
- "named_entities": named_entities,
- }
-
-iface = gr.Interface(
- predict,
- title='Alexa Clone 👩💼 🗪 🤖 Multilingual NLU',
- description='Upload your wav file to test the models (First execution take about 20s to 30s, then next run in less than 1s)',
- # thumbnail="",
- inputs=[
- gr.inputs.Audio(label='wav file', source='microphone', type='filepath'),
- gr.inputs.Dropdown(choices=list(models_paths.keys())),
- ],
- outputs=[
- gr.outputs.JSON(label='ASR -> Slot Recognition + Intent Classification + Language Classification'),
- ],
- examples=examples,
- article='Made with ❤️ by Yanis Labrak thanks to 🤗',
-)
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/util.py b/spaces/Gradio-Blocks/StyleGAN-NADA/util.py
deleted file mode 100644
index 083b56170f5feb72eccfebd38a53aed70db32064..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/StyleGAN-NADA/util.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from matplotlib import pyplot as plt
-import torch
-import torch.nn.functional as F
-import os
-import dlib
-from PIL import Image
-import numpy as np
-import scipy
-import scipy.ndimage
-import torchvision.transforms as transforms
-
-def display_image(image, size=None, mode='nearest', unnorm=False, title=''):
- # image is [3,h,w] or [1,3,h,w] tensor [0,1]
- if not isinstance(image, torch.Tensor):
- image = transforms.ToTensor()(image).unsqueeze(0)
- if image.is_cuda:
- image = image.cpu()
- if size is not None and image.size(-1) != size:
- image = F.interpolate(image, size=(size,size), mode=mode)
- if image.dim() == 4:
- image = image[0]
- image = image.permute(1, 2, 0).detach().numpy()
- plt.figure()
- plt.title(title)
- plt.axis('off')
- plt.imshow(image)
-
-def get_landmark(filepath, predictor):
- """get landmark with dlib
- :return: np.array shape=(68, 2)
- """
- detector = dlib.get_frontal_face_detector()
-
- img = dlib.load_rgb_image(filepath)
- dets = detector(img, 1)
- assert len(dets) > 0, "Face not detected, try another face image"
-
- for k, d in enumerate(dets):
- shape = predictor(img, d)
-
- t = list(shape.parts())
- a = []
- for tt in t:
- a.append([tt.x, tt.y])
- lm = np.array(a)
- return lm
-
-def align_face(filepath, predictor, output_size=256, transform_size=1024, enable_padding=True):
-
- """
- :param filepath: str
- :return: PIL Image
- """
- lm = get_landmark(filepath, predictor)
-
- lm_chin = lm[0: 17] # left-right
- lm_eyebrow_left = lm[17: 22] # left-right
- lm_eyebrow_right = lm[22: 27] # left-right
- lm_nose = lm[27: 31] # top-down
- lm_nostrils = lm[31: 36] # top-down
- lm_eye_left = lm[36: 42] # left-clockwise
- lm_eye_right = lm[42: 48] # left-clockwise
- lm_mouth_outer = lm[48: 60] # left-clockwise
- lm_mouth_inner = lm[60: 68] # left-clockwise
-
- # Calculate auxiliary vectors.
- eye_left = np.mean(lm_eye_left, axis=0)
- eye_right = np.mean(lm_eye_right, axis=0)
- eye_avg = (eye_left + eye_right) * 0.5
- eye_to_eye = eye_right - eye_left
- mouth_left = lm_mouth_outer[0]
- mouth_right = lm_mouth_outer[6]
- mouth_avg = (mouth_left + mouth_right) * 0.5
- eye_to_mouth = mouth_avg - eye_avg
-
- # Choose oriented crop rectangle.
- x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
- x /= np.hypot(*x)
- x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
- y = np.flipud(x) * [-1, 1]
- c = eye_avg + eye_to_mouth * 0.1
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
- qsize = np.hypot(*x) * 2
-
- # read image
- img = Image.open(filepath)
-
- transform_size = output_size
- enable_padding = True
-
- # Shrink.
- shrink = int(np.floor(qsize / output_size * 0.5))
- if shrink > 1:
- rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
- img = img.resize(rsize, Image.ANTIALIAS)
- quad /= shrink
- qsize /= shrink
-
- # Crop.
- border = max(int(np.rint(qsize * 0.1)), 3)
- crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
- min(crop[3] + border, img.size[1]))
- if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
- img = img.crop(crop)
- quad -= crop[0:2]
-
- # Pad.
- pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
- max(pad[3] - img.size[1] + border, 0))
- if enable_padding and max(pad) > border - 4:
- pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
- img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
- h, w, _ = img.shape
- y, x, _ = np.ogrid[:h, :w, :1]
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
- 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
- blur = qsize * 0.02
- img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
- img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
- img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
- quad += pad[:2]
-
- # Transform.
- img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)
- if output_size < transform_size:
- img = img.resize((output_size, output_size), Image.ANTIALIAS)
-
- # Return aligned image.
- return img
-
-def strip_path_extension(path):
- return os.path.splitext(path)[0]
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/anime-colorization/app.py b/spaces/Gradio-Blocks/anime-colorization/app.py
deleted file mode 100644
index 560fe908e4b0ea261d50fb36c72a53fa24f01953..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/anime-colorization/app.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""
-A Gradio Blocks Demo App.
-Generate a large batch of samples from a super resolution model, given a batch
-of samples from a regular model from image_sample.py.
-"""
-
-import gradio as gr
-import argparse
-import os
-import glob
-
-import blobfile as bf
-import numpy as np
-import torch as th
-import torch.distributed as dist
-
-from PIL import Image, ImageDraw
-from torchvision import utils
-from pixel_guide_diffusion import dist_util, logger
-from pixel_guide_diffusion.image_datasets import load_data
-from pixel_guide_diffusion.script_util import (
- pg_model_and_diffusion_defaults,
- pg_create_model_and_diffusion,
- pgsr_model_and_diffusion_defaults,
- pgsr_create_model_and_diffusion,
- args_to_dict,
- add_dict_to_argparser,
-)
-
-MODEL_FLAGS="--image_size=32 --small_size=32 --large_size=128 --guide_size=128 --num_channels=128 --num_channels2=64 --num_res_blocks=3 --learn_sigma=True --dropout=0.0 --use_attention2=False"
-DIFFUSION_FLAGS="--diffusion_steps=4000 --noise_schedule=cosine"
-TEST_FLAGS="--batch_size=1 --seed=233 --num_samples=4"
-OTHER_FLAGS = '''\
---timestep_respacing=16 \
---use_ddim=False \
---model_path=./danbooru2017_guided_log/ema_0.9999_360000.pt \
---model_path2=./danbooru2017_guided_sr_log/ema_0.9999_360000.pt'''
-OTHER_FLAGS = OTHER_FLAGS.replace('\r\n', ' ').replace('\n', ' ')
-flags = OTHER_FLAGS.split(' ') + MODEL_FLAGS.split(' ') + DIFFUSION_FLAGS.split(' ') + TEST_FLAGS.split(' ')
-
-
-def norm_size(img, size=128, add_edges=True):
- img = img.convert('L')
- w, h = img.size
- if w != h:
- scale = 1024 / max(img.size)
- img = img.resize([int(round(s*scale)) for s in img.size])
- w, h = img.size
- max_size = max(w, h)
- x0 = (max_size - w) // 2
- y0 = (max_size - h) // 2
- x1 = x0 + w
- y1 = y0 + h
- canvas = Image.new('L', (max_size,max_size), 255)
- canvas.paste(img, (x0,y0,x1,y1))
-
- if add_edges:
- draw = ImageDraw.Draw(canvas)
- draw.line((x0-5,0,x0-1,max_size), fill=0)
- draw.line((0,y0-5,max_size,y0-1), fill=0)
- draw.line((x1+1,0,x1+5,max_size), fill=0)
- draw.line((0,y1+1,max_size,y1+5), fill=0)
-
- img = canvas
- img = img.resize((size,size), resample=Image.LANCZOS)
-
- return img
-
-
-def create_argparser():
- defaults = dict(
- data_dir="",
- guide_dir="",
- clip_denoised=True,
- num_samples=100,
- batch_size=4,
- use_ddim=False,
- base_samples="",
- model_path="",
- seed=-1,
- )
- defaults.update(pg_model_and_diffusion_defaults())
- defaults.update(pgsr_model_and_diffusion_defaults())
- defaults.update(dict(
- num_channels2=128,
- use_attention2=True,
- model_path2="",
- ))
- parser = argparse.ArgumentParser()
- add_dict_to_argparser(parser, defaults)
- return parser
-
-
-@th.inference_mode()
-def main():
- args = create_argparser().parse_args(flags)
-
- dist_util.setup_dist()
- logger.configure()
-
- logger.log("creating model...")
- model, diffusion = pg_create_model_and_diffusion(
- **args_to_dict(args, pg_model_and_diffusion_defaults().keys())
- )
- model.load_state_dict(
- dist_util.load_state_dict(args.model_path, map_location="cpu")
- )
- model.to(dist_util.dev())
- model.eval()
-
- logger.log("creating model2...")
- args.num_channels = args.num_channels2
- args.use_attention = args.use_attention2
- model2, diffusion2 = pgsr_create_model_and_diffusion(
- **args_to_dict(args, pgsr_model_and_diffusion_defaults().keys())
- )
- model2.load_state_dict(
- dist_util.load_state_dict(args.model_path2, map_location="cpu")
- )
- model2.to(dist_util.dev())
- model2.eval()
-
- def inference(img, seed, add_edges):
- th.manual_seed(int(seed))
- sketch = sketch_out = norm_size(img, size=128, add_edges=add_edges)
- sketch = np.asarray(sketch).astype(np.float32) / 127.5 - 1
- sketch = th.from_numpy(sketch).float()[None,None].to(dist_util.dev())
- model_kwargs = { "guide": sketch }
- sample_fn = (
- diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
- )
- sample = sample_fn(
- model,
- (args.batch_size, 3, args.image_size, args.image_size),
- clip_denoised=args.clip_denoised,
- model_kwargs=model_kwargs,
- )
-
- model_kwargs["low_res"] = sample
- sample_fn2 = (
- diffusion2.p_sample_loop if not args.use_ddim else diffusion2.ddim_sample_loop
- )
- sample2 = sample_fn2(
- model2,
- (args.batch_size, 3, args.large_size, args.large_size),
- clip_denoised=args.clip_denoised,
- model_kwargs=model_kwargs,
- )
- out = (sample2[0].clamp(-1,1).cpu().numpy() + 1) / 2 * 255
- out = np.uint8(out)
- out = out.transpose([1,2,0])
- out = Image.fromarray(out)
-
- return sketch_out, out
-
- with gr.Blocks() as demo:
- gr.Markdown('''
Anime-Colorization
-
Colorize your anime sketches with this app.
-This is a Gradio Blocks app of
-
-HighCWu/pixel-guide-diffusion-for-anime-colorization
-.
-(PS: Training Datasets are made from
-HighCWu/danbooru-sketch-pair-128x
- which processed real anime images to sketches by
-SketchKeras.
-So the model is not very sensitive to some different styles of sketches,
-and the colorized results of such sketches are not very good.)
-''')
- with gr.Row():
- with gr.Box():
- with gr.Column():
- with gr.Row():
- seed_in = gr.Number(
- value=233,
- label='Seed'
- )
- with gr.Row():
- edges_in = gr.Checkbox(
- label="Add Edges"
- )
- with gr.Row():
- sketch_in = gr.Image(
- type="pil",
- label="Sketch"
- )
- with gr.Row():
- generate_button = gr.Button('Generate')
- with gr.Row():
- gr.Markdown('Click to add example as input.👇')
- with gr.Row():
- example_sketch_paths = [[p] for p in sorted(glob.glob('docs/imgs/anime_sketch/*.png'))]
- example_sketch = gr.Dataset(
- components=[sketch_in],
- samples=example_sketch_paths
- )
- with gr.Row():
- gr.Markdown('These are expect real outputs.👇')
- with gr.Row():
- example_real_paths = [[p] for p in sorted(glob.glob('docs/imgs/anime/*.png'))]
- example_real = gr.Dataset(
- components=[sketch_in],
- samples=example_real_paths
- )
-
- with gr.Box():
- with gr.Column():
- with gr.Row():
- with gr.Column():
- sketch_out = gr.Image(
- type="pil",
- label="Input"
- )
- with gr.Column():
- colorized_out = gr.Image(
- type="pil",
- label="Colorization Result"
- )
- with gr.Row():
- gr.Markdown(
- 'Here are some samples 👇 [top: sketch, center: generated, bottom: real]'
- )
- with gr.Row():
- gr.Image(
- value="docs/imgs/sample.png",
- type="filepath",
- interactive=False,
- label="Samples"
- )
- gr.Markdown(
- '
'
- )
-
- generate_button.click(
- inference, inputs=[sketch_in, seed_in, edges_in], outputs=[sketch_out, colorized_out]
- )
- example_sketch.click(
- fn=lambda examples: gr.Image.update(value=examples[0]),
- inputs=example_sketch,
- outputs=example_sketch.components
- )
-
- demo.launch()
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Gradio-Blocks/are-you-wearing-a-mask/README.md b/spaces/Gradio-Blocks/are-you-wearing-a-mask/README.md
deleted file mode 100644
index 7c155930e9a4d972e3e47cc2e77ad719b50b9b20..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/are-you-wearing-a-mask/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Are You Wearing A Mask
-emoji: 👁
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.0.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/utils/__init__.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/utils/__init__.py
deleted file mode 100644
index ac489e2dbbc0e6fa87f5088b4edcc20f8cadc1a6..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/utils/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .collect_env import collect_env
-from .logger import get_root_logger
-
-__all__ = ['get_root_logger', 'collect_env']
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/codebooks_patterns.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/codebooks_patterns.py
deleted file mode 100644
index c5b35cbea8cff84aa56116dbdd860fc72a913a13..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/codebooks_patterns.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import namedtuple
-from dataclasses import dataclass
-from functools import lru_cache
-import logging
-import typing as tp
-
-from abc import ABC, abstractmethod
-import torch
-
-LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index)
-PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class Pattern:
- """Base implementation of a pattern over a sequence with multiple codebooks.
-
- The codebook pattern consists in a layout, defining for each sequence step
- the list of coordinates of each codebook timestep in the resulting interleaved sequence.
- The first item of the pattern is always an empty list in order to properly insert a special token
- to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern
- and ``timesteps`` the number of timesteps corresponding to the original sequence.
-
- The pattern provides convenient methods to build and revert interleaved sequences from it:
- ``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T]
- to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size,
- K being the number of codebooks, T the number of original timesteps and S the number of sequence steps
- for the output sequence. The unfilled positions are replaced with a special token and the built sequence
- is returned along with a mask indicating valid tokens.
- ``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment
- of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask
- to fill and specify invalid positions if needed.
- See the dedicated methods for more details.
- """
- # Pattern layout, for each sequence step, we have a list of coordinates
- # corresponding to the original codebook timestep and position.
- # The first list is always an empty list in order to properly insert
- # a special token to start with.
- layout: PatternLayout
- timesteps: int
- n_q: int
-
- def __post_init__(self):
- assert len(self.layout) > 0
- assert self.layout[0] == []
- self._validate_layout()
- self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes)
- self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes)
- logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout))
-
- def _validate_layout(self):
- """Runs checks on the layout to ensure a valid pattern is defined.
- A pattern is considered invalid if:
- - Multiple timesteps for a same codebook are defined in the same sequence step
- - The timesteps for a given codebook are not in ascending order as we advance in the sequence
- (this would mean that we have future timesteps before past timesteps).
- """
- q_timesteps = {q: 0 for q in range(self.n_q)}
- for s, seq_coords in enumerate(self.layout):
- if len(seq_coords) > 0:
- qs = set()
- for coord in seq_coords:
- qs.add(coord.q)
- last_q_timestep = q_timesteps[coord.q]
- assert coord.t >= last_q_timestep, \
- f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}"
- q_timesteps[coord.q] = coord.t
- # each sequence step contains at max 1 coordinate per codebook
- assert len(qs) == len(seq_coords), \
- f"Multiple entries for a same codebook are found at step {s}"
-
- @property
- def num_sequence_steps(self):
- return len(self.layout) - 1
-
- @property
- def max_delay(self):
- max_t_in_seq_coords = 0
- for seq_coords in self.layout[1:]:
- for coords in seq_coords:
- max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1)
- return max_t_in_seq_coords - self.timesteps
-
- @property
- def valid_layout(self):
- valid_step = len(self.layout) - self.max_delay
- return self.layout[:valid_step]
-
- def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None):
- """Get codebook coordinates in the layout that corresponds to the specified timestep t
- and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step
- and the actual codebook coordinates.
- """
- assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps"
- if q is not None:
- assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks"
- coords = []
- for s, seq_codes in enumerate(self.layout):
- for code in seq_codes:
- if code.t == t and (q is None or code.q == q):
- coords.append((s, code))
- return coords
-
- def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]:
- return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)]
-
- def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]:
- steps_with_timesteps = self.get_steps_with_timestep(t, q)
- return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None
-
- def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool,
- device: tp.Union[torch.device, str] = 'cpu'):
- """Build scatter indexes corresponding to the pattern, up to the provided sequence_steps.
-
- Args:
- timesteps (int): Maximum number of timesteps steps to consider.
- keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps.
- device (Union[torch.device, str]): Device for created tensors.
- Returns:
- indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S].
- """
- assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
- assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern"
- # use the proper layout based on whether we limit ourselves to valid steps only or not,
- # note that using the valid_layout will result in a truncated sequence up to the valid steps
- ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
- # single item indexing being super slow with pytorch vs. numpy, so we use numpy here
- indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy()
- mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy()
- # fill indexes with last sequence step value that will correspond to our special token
- # the last value is n_q * timesteps as we have flattened z and append special token as the last token
- # which will correspond to the index: n_q * timesteps
- indexes[:] = n_q * timesteps
- # iterate over the pattern and fill scattered indexes and mask
- for s, sequence_coords in enumerate(ref_layout):
- for coords in sequence_coords:
- if coords.t < timesteps:
- indexes[coords.q, s] = coords.t + coords.q * timesteps
- mask[coords.q, s] = 1
- indexes = torch.from_numpy(indexes).to(device)
- mask = torch.from_numpy(mask).to(device)
- return indexes, mask
-
- def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
- """Build sequence corresponding to the pattern from the input tensor z.
- The sequence is built using up to sequence_steps if specified, and non-pattern
- coordinates are filled with the special token.
-
- Args:
- z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T].
- special_token (int): Special token used to fill non-pattern coordinates in the new sequence.
- keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
- Steps that are beyond valid steps will be replaced by the special_token in that case.
- Returns:
- values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S
- corresponding either to the sequence_steps if provided, otherwise to the length of the pattern.
- indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S].
- """
- B, K, T = z.shape
- indexes, mask = self._build_pattern_sequence_scatter_indexes(
- T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device)
- )
- z = z.view(B, -1)
- # we append the special token as the last index of our flattened z tensor
- z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1)
- values = z[:, indexes.view(-1)]
- values = values.view(B, K, indexes.shape[-1])
- return values, indexes, mask
-
- def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int,
- keep_only_valid_steps: bool = False,
- is_model_output: bool = False,
- device: tp.Union[torch.device, str] = 'cpu'):
- """Builds scatter indexes required to retrieve the original multi-codebook sequence
- from interleaving pattern.
-
- Args:
- sequence_steps (int): Sequence steps.
- n_q (int): Number of codebooks.
- keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
- Steps that are beyond valid steps will be replaced by the special_token in that case.
- is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not.
- device (Union[torch.device, str]): Device for created tensors.
- Returns:
- torch.Tensor: Indexes for reconstructing the output, of shape [K, T].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
- """
- ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
- # TODO(jade): Do we want to further truncate to only valid timesteps here as well?
- timesteps = self.timesteps
- assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
- assert sequence_steps <= len(ref_layout), \
- f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}"
-
- # ensure we take the appropriate indexes to keep the model output from the first special token as well
- if is_model_output:
- ref_layout = ref_layout[1:]
-
- # single item indexing being super slow with pytorch vs. numpy, so we use numpy here
- indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy()
- mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy()
- # fill indexes with last sequence step value that will correspond to our special token
- indexes[:] = n_q * sequence_steps
- for s, sequence_codes in enumerate(ref_layout):
- if s < sequence_steps:
- for code in sequence_codes:
- if code.t < timesteps:
- indexes[code.q, code.t] = s + code.q * sequence_steps
- mask[code.q, code.t] = 1
- indexes = torch.from_numpy(indexes).to(device)
- mask = torch.from_numpy(mask).to(device)
- return indexes, mask
-
- def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
- """Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving.
- The sequence is reverted using up to timesteps if specified, and non-pattern coordinates
- are filled with the special token.
-
- Args:
- s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S].
- special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence.
- Returns:
- values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T
- corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise.
- indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
- """
- B, K, S = s.shape
- indexes, mask = self._build_reverted_sequence_scatter_indexes(
- S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device)
- )
- s = s.view(B, -1)
- # we append the special token as the last index of our flattened z tensor
- s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1)
- values = s[:, indexes.view(-1)]
- values = values.view(B, K, indexes.shape[-1])
- return values, indexes, mask
-
- def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False):
- """Revert model logits obtained on a sequence built from the pattern
- back to a tensor matching the original sequence.
-
- This method is similar to ``revert_pattern_sequence`` with the following specificities:
- 1. It is designed to work with the extra cardinality dimension
- 2. We return the logits for the first sequence item that matches the special_token and
- which matching target in the original sequence is the first item of the sequence,
- while we skip the last logits as there is no matching target
- """
- B, card, K, S = logits.shape
- indexes, mask = self._build_reverted_sequence_scatter_indexes(
- S, K, keep_only_valid_steps, is_model_output=True, device=logits.device
- )
- logits = logits.reshape(B, card, -1)
- # we append the special token as the last index of our flattened z tensor
- logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S]
- values = logits[:, :, indexes.view(-1)]
- values = values.view(B, card, K, indexes.shape[-1])
- return values, indexes, mask
-
-
-class CodebooksPatternProvider(ABC):
- """Abstraction around providing pattern for interleaving codebooks.
-
- The CodebooksPatternProvider abstraction allows to implement various strategies to
- define interleaving pattern of sequences composed of multiple codebooks. For a given
- number of codebooks `n_q`, the pattern provider can generate a specified pattern
- corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern
- can be used to construct a new sequence from the original codes respecting the specified
- pattern. The pattern is defined as a list of list of code coordinates, code coordinate
- being a tuple with the original timestep and codebook to build the new sequence.
- Note that all patterns must start with an empty list that is then used to insert a first
- sequence step of special tokens in the newly generated sequence.
-
- Args:
- n_q (int): number of codebooks.
- cached (bool): if True, patterns for a given length are cached. In general
- that should be true for efficiency reason to avoid synchronization points.
- """
- def __init__(self, n_q: int, cached: bool = True):
- assert n_q > 0
- self.n_q = n_q
- self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore
-
- @abstractmethod
- def get_pattern(self, timesteps: int) -> Pattern:
- """Builds pattern with specific interleaving between codebooks.
-
- Args:
- timesteps (int): Total numer of timesteps.
- """
- raise NotImplementedError()
-
-
-class DelayedPatternProvider(CodebooksPatternProvider):
- """Provider for delayed pattern across delayed codebooks.
- Codebooks are delayed in the sequence and sequence steps will contain codebooks
- from different timesteps.
-
- Example:
- Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- The resulting sequence obtained from the returned pattern is:
- [[S, 1, 2, 3, 4],
- [S, S, 1, 2, 3],
- [S, S, S, 1, 2]]
- (with S being a special token)
-
- Args:
- n_q (int): Number of codebooks.
- delays (Optional[List[int]]): Delay for each of the codebooks.
- If delays not defined, each codebook is delayed by 1 compared to the previous one.
- flatten_first (int): Flatten the first N timesteps.
- empty_initial (int): Prepend with N empty list of coordinates.
- """
- def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None,
- flatten_first: int = 0, empty_initial: int = 0):
- super().__init__(n_q)
- if delays is None:
- delays = list(range(n_q))
- self.delays = delays
- self.flatten_first = flatten_first
- self.empty_initial = empty_initial
- assert len(self.delays) == self.n_q
- assert sorted(self.delays) == self.delays
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- max_delay = max(self.delays)
- if self.empty_initial:
- out += [[] for _ in range(self.empty_initial)]
- if self.flatten_first:
- for t in range(min(timesteps, self.flatten_first)):
- for q in range(self.n_q):
- out.append([LayoutCoord(t, q)])
- for t in range(self.flatten_first, timesteps + max_delay):
- v = []
- for q, delay in enumerate(self.delays):
- t_for_q = t - delay
- if t_for_q >= self.flatten_first:
- v.append(LayoutCoord(t_for_q, q))
- out.append(v)
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class ParallelPatternProvider(DelayedPatternProvider):
- """Provider for parallel pattern across codebooks.
- This pattern provider is a special case of the delayed pattern with actually no delay,
- hence delays=repeat(0, n_q).
-
- Args:
- n_q (int): Number of codebooks.
- """
- def __init__(self, n_q: int):
- super().__init__(n_q, [0] * n_q)
-
-
-class UnrolledPatternProvider(CodebooksPatternProvider):
- """Provider for unrolling codebooks pattern.
- This pattern provider enables to represent the codebook flattened completely or only to some extend
- while also specifying a given delay between the flattened codebooks representation, allowing to
- unroll the codebooks in the sequence.
-
- Example:
- 1. Flattening of the codebooks.
- By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q),
- taking n_q = 3 and timesteps = 4:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, S, 1, S, S, 2, S, S, 3, S, S, 4],
- [S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [1, S, S, 2, S, S, 3, S, S, 4, S, S]]
- 2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step
- for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example
- taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [1, S, S, 2, S, S, 3, S, S, 4, S, S]]
- 3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks
- allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the
- same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1]
- and delays = [0, 3, 3]:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, S, S, 1, S, 2, S, 3, S, 4],
- [S, S, S, 1, S, 2, S, 3, S, 4],
- [1, 2, 3, S, 4, S, 5, S, 6, S]]
-
- Args:
- n_q (int): Number of codebooks.
- flattening (Optional[List[int]]): Flattening schema over the codebooks. If not defined,
- the codebooks will be flattened to 1 codebook per step, meaning that the sequence will
- have n_q extra steps for each timestep.
- delays (Optional[List[int]]): Delay for each of the codebooks. If not defined,
- no delay is added and therefore will default to [0] * ``n_q``.
- Note that two codebooks that will be flattened to the same inner step
- should have the same delay, otherwise the pattern is considered as invalid.
- """
- FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay'])
-
- def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None,
- delays: tp.Optional[tp.List[int]] = None):
- super().__init__(n_q)
- if flattening is None:
- flattening = list(range(n_q))
- if delays is None:
- delays = [0] * n_q
- assert len(flattening) == n_q
- assert len(delays) == n_q
- assert sorted(flattening) == flattening
- assert sorted(delays) == delays
- self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening)
- self.max_delay = max(delays)
-
- def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]):
- """Build a flattened codebooks representation as a dictionary of inner step
- and the actual codebook indices corresponding to the flattened codebook. For convenience, we
- also store the delay associated to the flattened codebook to avoid maintaining an extra mapping.
- """
- flattened_codebooks: dict = {}
- for q, (inner_step, delay) in enumerate(zip(flattening, delays)):
- if inner_step not in flattened_codebooks:
- flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay)
- else:
- flat_codebook = flattened_codebooks[inner_step]
- assert flat_codebook.delay == delay, (
- "Delay and flattening between codebooks is inconsistent: ",
- "two codebooks flattened to the same position should have the same delay."
- )
- flat_codebook.codebooks.append(q)
- flattened_codebooks[inner_step] = flat_codebook
- return flattened_codebooks
-
- @property
- def _num_inner_steps(self):
- """Number of inner steps to unroll between timesteps in order to flatten the codebooks.
- """
- return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1
-
- def num_virtual_steps(self, timesteps: int) -> int:
- return timesteps * self._num_inner_steps + 1
-
- def get_pattern(self, timesteps: int) -> Pattern:
- """Builds pattern for delay across codebooks.
-
- Args:
- timesteps (int): Total numer of timesteps.
- """
- # the PatternLayout is built as a tuple of sequence position and list of coordinates
- # so that it can be reordered properly given the required delay between codebooks of given timesteps
- indexed_out: list = [(-1, [])]
- max_timesteps = timesteps + self.max_delay
- for t in range(max_timesteps):
- # for each timestep, we unroll the flattened codebooks,
- # emitting the sequence step with the corresponding delay
- for step in range(self._num_inner_steps):
- if step in self._flattened_codebooks:
- # we have codebooks at this virtual step to emit
- step_codebooks = self._flattened_codebooks[step]
- t_for_q = t + step_codebooks.delay
- coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks]
- if t_for_q < max_timesteps and t < max_timesteps:
- indexed_out.append((t_for_q, coords))
- else:
- # there is no codebook in this virtual step so we emit an empty list
- indexed_out.append((t, []))
- out = [coords for _, coords in sorted(indexed_out)]
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class VALLEPattern(CodebooksPatternProvider):
- """Almost VALL-E style pattern. We futher allow some delays for the
- codebooks other than the first one.
-
- Args:
- n_q (int): Number of codebooks.
- delays (Optional[List[int]]): Delay for each of the codebooks.
- If delays not defined, each codebook is delayed by 1 compared to the previous one.
- """
- def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None):
- super().__init__(n_q)
- if delays is None:
- delays = [0] * (n_q - 1)
- self.delays = delays
- assert len(self.delays) == self.n_q - 1
- assert sorted(self.delays) == self.delays
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- for t in range(timesteps):
- out.append([LayoutCoord(t, 0)])
- max_delay = max(self.delays)
- for t in range(timesteps + max_delay):
- v = []
- for q, delay in enumerate(self.delays):
- t_for_q = t - delay
- if t_for_q >= 0:
- v.append(LayoutCoord(t_for_q, q + 1))
- out.append(v)
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class MusicLMPattern(CodebooksPatternProvider):
- """Almost MusicLM style pattern. This is equivalent to full flattening
- but in a different order.
-
- Args:
- n_q (int): Number of codebooks.
- group_by (int): Number of codebooks to group together.
- """
- def __init__(self, n_q: int, group_by: int = 2):
- super().__init__(n_q)
- self.group_by = group_by
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- for offset in range(0, self.n_q, self.group_by):
- for t in range(timesteps):
- for q in range(offset, offset + self.group_by):
- out.append([LayoutCoord(t, q)])
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
diff --git a/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/PMF0Predictor.py b/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/PMF0Predictor.py
deleted file mode 100644
index ccf4128436c5b7e5a3e720d4597bad0c622d0920..0000000000000000000000000000000000000000
--- a/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/PMF0Predictor.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from modules.F0Predictor.F0Predictor import F0Predictor
-import parselmouth
-import numpy as np
-
-class PMF0Predictor(F0Predictor):
- def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
-
- def interpolate_f0(self,f0):
- '''
- 对F0进行插值处理
- '''
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:,0], vuv_vector[:,0]
-
- def compute_f0(self,wav,p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0]//self.hop_length
- else:
- assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = parselmouth.Sound(x, self.sampling_rate).to_pitch_ac(
- time_step=time_step / 1000, voicing_threshold=0.6,
- pitch_floor=self.f0_min, pitch_ceiling=self.f0_max).selected_array['frequency']
-
- pad_size=(p_len - len(f0) + 1) // 2
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
- f0,uv = self.interpolate_f0(f0)
- return f0
-
- def compute_f0_uv(self,wav,p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0]//self.hop_length
- else:
- assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = parselmouth.Sound(x, self.sampling_rate).to_pitch_ac(
- time_step=time_step / 1000, voicing_threshold=0.6,
- pitch_floor=self.f0_min, pitch_ceiling=self.f0_max).selected_array['frequency']
-
- pad_size=(p_len - len(f0) + 1) // 2
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
- f0,uv = self.interpolate_f0(f0)
- return f0,uv
diff --git a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifiganwithsnake/utils.py b/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifiganwithsnake/utils.py
deleted file mode 100644
index 9c93c996d3cc73c30d71c1fc47056e4230f35c0f..0000000000000000000000000000000000000000
--- a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifiganwithsnake/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-# matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
-
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/base_model.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/base_model.py
deleted file mode 100644
index a60993a6cc3c5ccc3a207c2fc9b424d9081ead54..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/base_model.py
+++ /dev/null
@@ -1,51 +0,0 @@
-
-from typing import List
-
-import numpy as np
-from torch import Tensor, nn
-
-
-class BaseRGBDModel(nn.Module):
- def __init__(self):
- super(BaseRGBDModel, self).__init__()
- """
- Requirements:
- 1. Construct a model
- 2. Load pretrained weights
- 3. Load model into device
- 4. Construct preprocessing
- """
-
- def inference(
- self, image: Tensor, depth: Tensor,
- ) -> np.ndarray:
- """
- Given:
- - An image (Tensor) with original shape [c, h, w]
- - A depth image (Tensor) with a shape of [c, h, w], do not need to be the same shape as image
-
- Requirements:
- 1. Preprocessing
- 2. Inference
- 3. Return saliency maps np.float32 between 0.0 and 1.0,
- with the same size as original size
-
- """
- raise NotImplementedError()
-
- def batch_inference(
- self, images: Tensor, depths: Tensor,
- ) -> List[np.ndarray]:
- """
- Given:
- - A batch of images (Tensor) with original shape [b, c, h, w]
- - A batch of depths (Tensor) with a shape of [b, c, h, w], do not need to be the same shape as image
-
- Requirements:
- 1. Preprocessing
- 2. Inference
- 3. Return saliency maps np.float32 between 0.0 and 1.0,
- with the same size as original size
-
- """
- raise NotImplementedError()
\ No newline at end of file
diff --git a/spaces/HaiTang/DeepDanbooru_string/README.md b/spaces/HaiTang/DeepDanbooru_string/README.md
deleted file mode 100644
index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000
--- a/spaces/HaiTang/DeepDanbooru_string/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: DeepDanbooru String
-emoji: 💬
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-duplicated_from: NoCrypt/DeepDanbooru_string
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/num_samples_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/num_samples_dataset.py
deleted file mode 100644
index 99a17495c701d8a05e0268f98bf453905e11d078..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/num_samples_dataset.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from . import FairseqDataset
-
-
-class NumSamplesDataset(FairseqDataset):
- def __getitem__(self, index):
- return 1
-
- def __len__(self):
- return 0
-
- def collater(self, samples):
- return sum(samples)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/__init__.py
deleted file mode 100644
index 3532479e52a0e1f1ba204c6f5d51c71c98ee5df0..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import importlib
-import os
-
-
-# automatically import any Python files in the models/ directory
-models_dir = os.path.dirname(__file__)
-for file in os.listdir(models_dir):
- path = os.path.join(models_dir, file)
- if (
- not file.startswith("_")
- and not file.startswith(".")
- and (file.endswith(".py") or os.path.isdir(path))
- ):
- model_name = file[: file.find(".py")] if file.endswith(".py") else file
- module = importlib.import_module("fairseq.model_parallel.models." + model_name)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage2_base.sh b/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage2_base.sh
deleted file mode 100644
index e5518b2ffcfdab4ffd84fe55e08253aa5cf084dd..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage2_base.sh
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env
-
-# The port for communication. Note that if you want to run multiple tasks on the same machine,
-# you need to specify different port numbers.
-export MASTER_PORT=1062
-
-log_dir=./stage2_logs
-save_dir=./stage2_checkpoints
-mkdir -p $log_dir $save_dir
-
-bpe_dir=../../utils/BPE
-user_dir=../../ofa_module
-
-data_dir=../../dataset/caption_data
-data=${data_dir}/caption_stage2_train.tsv,${data_dir}/caption_val.tsv
-restore_file=../../checkpoints/caption_stage1_base_best.pt
-selected_cols=1,4,2
-
-task=caption
-arch=ofa_base
-criterion=scst_reward_criterion
-label_smoothing=0.1
-lr=1e-5
-max_epoch=5
-warmup_ratio=0.06
-batch_size=2
-update_freq=4
-resnet_drop_path_rate=0.0
-encoder_drop_path_rate=0.0
-decoder_drop_path_rate=0.0
-dropout=0.0
-attention_dropout=0.0
-max_src_length=80
-max_tgt_length=20
-num_bins=1000
-patch_image_size=480
-eval_cider_cached=${data_dir}/cider_cached_tokens/coco-valid-words.p
-scst_cider_cached=${data_dir}/cider_cached_tokens/coco-train-words.p
-
-for lr in {1e-5,}; do
- echo "lr "${lr}
- for max_epoch in {3,}; do
- echo "max_epoch "${max_epoch}
-
- log_file=${log_dir}/${lr}"_"${max_epoch}".log"
- save_path=${save_dir}/${lr}"_"${max_epoch}
- mkdir -p $save_path
-
- CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python3 -m torch.distributed.launch --nproc_per_node=8 --master_port=${MASTER_PORT} ../../train.py \
- $data \
- --selected-cols=${selected_cols} \
- --bpe-dir=${bpe_dir} \
- --user-dir=${user_dir} \
- --restore-file=${restore_file} \
- --reset-optimizer --reset-dataloader --reset-meters \
- --save-dir=${save_path} \
- --task=${task} \
- --arch=${arch} \
- --criterion=${criterion} \
- --batch-size=${batch_size} \
- --update-freq=${update_freq} \
- --encoder-normalize-before \
- --decoder-normalize-before \
- --share-decoder-input-output-embed \
- --share-all-embeddings \
- --layernorm-embedding \
- --patch-layernorm-embedding \
- --code-layernorm-embedding \
- --resnet-drop-path-rate=${resnet_drop_path_rate} \
- --encoder-drop-path-rate=${encoder_drop_path_rate} \
- --decoder-drop-path-rate=${decoder_drop_path_rate} \
- --dropout=${dropout} \
- --attention-dropout=${attention_dropout} \
- --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \
- --lr-scheduler=polynomial_decay --lr=${lr} \
- --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \
- --log-format=simple --log-interval=10 \
- --fixed-validation-seed=7 \
- --no-epoch-checkpoints --keep-best-checkpoints=1 \
- --save-interval=1 --validate-interval=1 \
- --save-interval-updates=500 --validate-interval-updates=500 \
- --eval-cider \
- --eval-cider-cached-tokens=${eval_cider_cached} \
- --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \
- --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \
- --max-src-length=${max_src_length} \
- --max-tgt-length=${max_tgt_length} \
- --find-unused-parameters \
- --freeze-encoder-embedding \
- --freeze-decoder-embedding \
- --add-type-embedding \
- --scale-attn \
- --scale-fc \
- --scale-heads \
- --disable-entangle \
- --num-bins=${num_bins} \
- --patch-image-size=${patch_image_size} \
- --scst \
- --scst-cider-cached-tokens=${scst_cider_cached} \
- --scst-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \
- --memory-efficient-fp16 \
- --fp16-scale-window=512 \
- --num-workers=0 > ${log_file} 2>&1
- done
-done
\ No newline at end of file
diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/get_vocab.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/get_vocab.py
deleted file mode 100644
index 76eb55904a0bf46c32d140848bda384dad584ca6..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/get_vocab.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#! /usr/bin/env python
-from __future__ import print_function
-
-import os
-import sys
-import inspect
-import warnings
-import argparse
-import codecs
-
-from collections import Counter
-
-# hack for python2/3 compatibility
-from io import open
-argparse.open = open
-
-def create_parser(subparsers=None):
-
- if subparsers:
- parser = subparsers.add_parser('get-vocab',
- formatter_class=argparse.RawDescriptionHelpFormatter,
- description="Generates vocabulary")
- else:
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawDescriptionHelpFormatter,
- description="Generates vocabulary")
-
- parser.add_argument(
- '--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
- metavar='PATH',
- help="Input file (default: standard input).")
-
- parser.add_argument(
- '--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
- metavar='PATH',
- help="Output file (default: standard output)")
-
- return parser
-
-def get_vocab(train_file, vocab_file):
-
- c = Counter()
-
- for line in train_file:
- for word in line.strip('\r\n ').split(' '):
- if word:
- c[word] += 1
-
- for key,f in sorted(c.items(), key=lambda x: x[1], reverse=True):
- vocab_file.write(key+" "+ str(f) + "\n")
-
-if __name__ == "__main__":
-
- currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
- newdir = os.path.join(currentdir, 'subword_nmt')
- if os.path.isdir(newdir):
- warnings.simplefilter('default')
- warnings.warn(
- "this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir),
- DeprecationWarning
- )
-
- # python 2/3 compatibility
- if sys.version_info < (3, 0):
- sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
- sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
- sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
- else:
- sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
- sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
- sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
-
- parser = create_parser()
- args = parser.parse_args()
-
- # read/write files as UTF-8
- if args.input.name != '':
- args.input = codecs.open(args.input.name, encoding='utf-8')
- if args.output.name != '':
- args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
-
- get_vocab(args.input, args.output)
\ No newline at end of file
diff --git a/spaces/Hazem/roop/roop/globals.py b/spaces/Hazem/roop/roop/globals.py
deleted file mode 100644
index 77fd391db235b878ce1f91765596bd76adb06697..0000000000000000000000000000000000000000
--- a/spaces/Hazem/roop/roop/globals.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from typing import List
-
-source_path = None
-target_path = None
-output_path = None
-frame_processors: List[str] = []
-keep_fps = None
-keep_audio = None
-keep_frames = None
-many_faces = None
-video_encoder = None
-video_quality = None
-max_memory = None
-execution_providers: List[str] = []
-execution_threads = None
-headless = None
-log_level = 'error'
diff --git a/spaces/Heisenberg08/Ai_Portrait_Mode/tempCodeRunnerFile.py b/spaces/Heisenberg08/Ai_Portrait_Mode/tempCodeRunnerFile.py
deleted file mode 100644
index 166afb1b207219d28df158a4a838c1e99f4988ea..0000000000000000000000000000000000000000
--- a/spaces/Heisenberg08/Ai_Portrait_Mode/tempCodeRunnerFile.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import torch
-import torchvision
-from torchvision import transforms
-
-import numpy as np
-import matplotlib.pyplot as plt
-from PIL import Image
-from model import DoubleConv,UNET
-
-import os
-os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
-
-
-convert_tensor = transforms.ToTensor()
-device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
-# print(device)
-
-model = UNET(in_channels=3, out_channels=1).to(device)
-model=torch.load("Unet_acc_94.pth",map_location=torch.device('cpu'))
-
-# test_img=np.array(Image.open("profilepic - Copy.jpeg").resize((160,240)))
-test_img=Image.open("104.jpg").resize((240,160))
-
-# test_img=torch.tensor(test_img).permute(2,1,0)
-# test_img=test_img.unsqueeze(0)
-test_img=convert_tensor(test_img).unsqueeze(0)
-print(test_img.shape)
-preds=model(test_img.float())
-preds=torch.sigmoid(preds)
-preds=(preds > 0.5).float()
-print(preds.shape)
-im=preds.squeeze(0).permute(1,2,0).detach()
-print(im.shape)
-fig,axs=plt.subplots(1,2)
-
-axs[0].imshow(im)
-axs[1].imshow(test_img.squeeze(0).permute(1,2,0).detach())
-plt.show()
diff --git a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/lr_scheduler.py b/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/lr_scheduler.py
deleted file mode 100644
index e598ed120159c53da6820a55ad86b89f5c70c82d..0000000000000000000000000000000000000000
--- a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/lr_scheduler.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import numpy as np
-
-
-class LambdaWarmUpCosineScheduler:
- """
- note: use with a base_lr of 1.0
- """
- def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
- self.lr_warm_up_steps = warm_up_steps
- self.lr_start = lr_start
- self.lr_min = lr_min
- self.lr_max = lr_max
- self.lr_max_decay_steps = max_decay_steps
- self.last_lr = 0.
- self.verbosity_interval = verbosity_interval
-
- def schedule(self, n):
- if self.verbosity_interval > 0:
- if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
- if n < self.lr_warm_up_steps:
- lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
- self.last_lr = lr
- return lr
- else:
- t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
- t = min(t, 1.0)
- lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
- 1 + np.cos(t * np.pi))
- self.last_lr = lr
- return lr
-
- def __call__(self, n):
- return self.schedule(n)
-
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/__init__.py
deleted file mode 100644
index 3b2a99c1227f827768911e5e22e79f6865ffbfd3..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .lightconv_layer import LightconvLayer # noqa
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/utils/file_client.py b/spaces/Iceclear/StableSR/StableSR/basicsr/utils/file_client.py
deleted file mode 100644
index 89d83ab9e0d4314f8cdf2393908a561c6d1dca92..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/utils/file_client.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501
-from abc import ABCMeta, abstractmethod
-
-
-class BaseStorageBackend(metaclass=ABCMeta):
- """Abstract class of storage backends.
-
- All backends need to implement two apis: ``get()`` and ``get_text()``.
- ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
- as texts.
- """
-
- @abstractmethod
- def get(self, filepath):
- pass
-
- @abstractmethod
- def get_text(self, filepath):
- pass
-
-
-class MemcachedBackend(BaseStorageBackend):
- """Memcached storage backend.
-
- Attributes:
- server_list_cfg (str): Config file for memcached server list.
- client_cfg (str): Config file for memcached client.
- sys_path (str | None): Additional path to be appended to `sys.path`.
- Default: None.
- """
-
- def __init__(self, server_list_cfg, client_cfg, sys_path=None):
- if sys_path is not None:
- import sys
- sys.path.append(sys_path)
- try:
- import mc
- except ImportError:
- raise ImportError('Please install memcached to enable MemcachedBackend.')
-
- self.server_list_cfg = server_list_cfg
- self.client_cfg = client_cfg
- self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
- # mc.pyvector servers as a point which points to a memory cache
- self._mc_buffer = mc.pyvector()
-
- def get(self, filepath):
- filepath = str(filepath)
- import mc
- self._client.Get(filepath, self._mc_buffer)
- value_buf = mc.ConvertBuffer(self._mc_buffer)
- return value_buf
-
- def get_text(self, filepath):
- raise NotImplementedError
-
-
-class HardDiskBackend(BaseStorageBackend):
- """Raw hard disks storage backend."""
-
- def get(self, filepath):
- filepath = str(filepath)
- with open(filepath, 'rb') as f:
- value_buf = f.read()
- return value_buf
-
- def get_text(self, filepath):
- filepath = str(filepath)
- with open(filepath, 'r') as f:
- value_buf = f.read()
- return value_buf
-
-
-class LmdbBackend(BaseStorageBackend):
- """Lmdb storage backend.
-
- Args:
- db_paths (str | list[str]): Lmdb database paths.
- client_keys (str | list[str]): Lmdb client keys. Default: 'default'.
- readonly (bool, optional): Lmdb environment parameter. If True,
- disallow any write operations. Default: True.
- lock (bool, optional): Lmdb environment parameter. If False, when
- concurrent access occurs, do not lock the database. Default: False.
- readahead (bool, optional): Lmdb environment parameter. If False,
- disable the OS filesystem readahead mechanism, which may improve
- random read performance when a database is larger than RAM.
- Default: False.
-
- Attributes:
- db_paths (list): Lmdb database path.
- _client (list): A list of several lmdb envs.
- """
-
- def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs):
- try:
- import lmdb
- except ImportError:
- raise ImportError('Please install lmdb to enable LmdbBackend.')
-
- if isinstance(client_keys, str):
- client_keys = [client_keys]
-
- if isinstance(db_paths, list):
- self.db_paths = [str(v) for v in db_paths]
- elif isinstance(db_paths, str):
- self.db_paths = [str(db_paths)]
- assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, '
- f'but received {len(client_keys)} and {len(self.db_paths)}.')
-
- self._client = {}
- for client, path in zip(client_keys, self.db_paths):
- self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs)
-
- def get(self, filepath, client_key):
- """Get values according to the filepath from one lmdb named client_key.
-
- Args:
- filepath (str | obj:`Path`): Here, filepath is the lmdb key.
- client_key (str): Used for distinguishing different lmdb envs.
- """
- filepath = str(filepath)
- assert client_key in self._client, (f'client_key {client_key} is not in lmdb clients.')
- client = self._client[client_key]
- with client.begin(write=False) as txn:
- value_buf = txn.get(filepath.encode('ascii'))
- return value_buf
-
- def get_text(self, filepath):
- raise NotImplementedError
-
-
-class FileClient(object):
- """A general file client to access files in different backend.
-
- The client loads a file or text in a specified backend from its path
- and return it as a binary file. it can also register other backend
- accessor with a given name and backend class.
-
- Attributes:
- backend (str): The storage backend type. Options are "disk",
- "memcached" and "lmdb".
- client (:obj:`BaseStorageBackend`): The backend object.
- """
-
- _backends = {
- 'disk': HardDiskBackend,
- 'memcached': MemcachedBackend,
- 'lmdb': LmdbBackend,
- }
-
- def __init__(self, backend='disk', **kwargs):
- if backend not in self._backends:
- raise ValueError(f'Backend {backend} is not supported. Currently supported ones'
- f' are {list(self._backends.keys())}')
- self.backend = backend
- self.client = self._backends[backend](**kwargs)
-
- def get(self, filepath, client_key='default'):
- # client_key is used only for lmdb, where different fileclients have
- # different lmdb environments.
- if self.backend == 'lmdb':
- return self.client.get(filepath, client_key)
- else:
- return self.client.get(filepath)
-
- def get_text(self, filepath):
- return self.client.get_text(filepath)
diff --git a/spaces/Illumotion/Koboldcpp/make_pyinstaller_hybrid_henk.bat b/spaces/Illumotion/Koboldcpp/make_pyinstaller_hybrid_henk.bat
deleted file mode 100644
index fa5a8e232a9e731f4b9413f990b255cdb6705c73..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/make_pyinstaller_hybrid_henk.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-cd /d "%~dp0"
-copy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4\bin\cudart64_110.dll" .\ /Y
-copy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4\bin\cublasLt64_11.dll" .\ /Y
-copy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4\bin\cublas64_11.dll" .\ /Y
-PyInstaller --noconfirm --onefile --collect-all customtkinter --clean --console --icon ".\niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp_default.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cudart64_110.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cublas64_11.dll;." --add-data "./rwkv_vocab.embd;." --add-data "C:/Windows/System32/msvcp140.dll;." --add-data "C:/Windows/System32/vcruntime140_1.dll;." "./koboldcpp.py" -n "koboldcpp.exe"
\ No newline at end of file
diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_logreg.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_logreg.py
deleted file mode 100644
index 166ccdfa77ae3dc6279b2420d9ec074c6300a8ce..0000000000000000000000000000000000000000
--- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_logreg.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import cv2
-import numpy as np
-from PIL import Image
-import pickle
-import tensorflow as tf
-import os
-
-class snakeLogReg:
- def __init__(self,url) -> None:
- self.image = url
-
- def predict_image(self):
- # Load the model
- load_extractor = tf.keras.models.load_model("././Model/Snake/resnetLogreg/resnet_EXTRACTOR.h5")
-
- modelpath = "././Model/Snake/resnetLogreg/dataSaved.pkl"
-
- with open(modelpath, 'rb') as file:
- saved_data = pickle.load(file)
- animal_breed = saved_data['class_name']
- model = saved_data['logreg_model']
-
- im = Image.open(self.image)
- img = im.convert("RGB")
- img= np.asarray(img)
- image_resized= cv2.resize(img, (224,224))
- features = load_extractor.predict(np.expand_dims(image_resized, axis=0))
-
- reshaped_features = features.reshape(features.shape[0],-1)
- predicted_class = model.predict(reshaped_features)
- pred_prob = model.predict_proba(reshaped_features)[:2]
- prediction_probability = pred_prob[0][predicted_class[0]]
- predicted_class
-
- output_class= animal_breed[predicted_class[0]]
-
- return [output_class, prediction_probability]
diff --git a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/AlgorithmsInfo/ranForestInfo.py b/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/AlgorithmsInfo/ranForestInfo.py
deleted file mode 100644
index 0ff276b6310fc8be3fa5c9c91db2dd8172238ebb..0000000000000000000000000000000000000000
--- a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/AlgorithmsInfo/ranForestInfo.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from shiny import module, ui, reactive, render
-from shiny.types import ImgData
-from pathlib import Path
-
-explanation_img_path = Path(__file__).parent.parent / "images"
-
-
-@module.ui
-def ranForest_def_ui():
- return ui.div(
- ui.div(
- ui.markdown("Un bosque aleatorio (random forest) es un clasificador de conjunto que está **compuesto por múltiples árboles de decisión**. Los árboles de decisión individuales pueden sufrir sobreajuste cuando son muy profundos, lo que lleva a una alta variación en los resultados de clasificación para pequeños cambios en los datos de entrada. En un bosque aleatorio, los árboles se entrenan utilizando diferentes subconjuntos del conjunto de datos de entrenamiento. Para clasificar una nueva muestra, se pasa a través de cada árbol del bosque, y cada árbol genera un resultado de clasificación basado en una parte específica de la muestra. El bosque aleatorio selecciona la clasificación con la mayor cantidad de 'votos' en caso de clasificación discreta, o el promedio de las clasificaciones en caso de clasificación numérica. Al considerar los resultados de múltiples árboles, puede reducir la variación y el sobreajuste, mejorando la estabilidad y precisión de las clasificaciones.")
- , style="padding-right:50px; text-align: justify; text-justify: inter-word;"
- ),
- ui.div(
- ui.markdown("A continuación, se muestra cómo un bosque aleatorio está formado por múltiples árboles de decisión (tres en este caso).")
- , style="padding-right:50px; padding-bottom:10px; text-align: justify; text-justify: inter-word;"
- ),
- ui.output_image("ran_forest_expl_image", height="260px"),
- )
-
-@module.ui
-def ranForest_howTo_ui():
- return ui.div(
- {"id": "ran_forest_how_generate"},
- ui.input_action_button("ran_forest_show_how_info", "¿Cómo se genera el modelo de bosque aleatorio? ▽"
- , style="padding: 30px 0px 10px 0px; background: white; border: none; font-weight: bold; text-decoration: underline; border: 0 !important; box-shadow: 0 0 !important; transition: 0.1s !important; background-color: transparent !important;"),
-
- )
-
-@module.ui
-def ranForest_performance_ui():
- return ui.div(
- ui.div(
- ui.markdown("""**No hay un umbral exacto para considerar un modelo como bueno**, ya que depende del contexto y las necesidades del problema. En general, en aplicaciones relacionadas con el ámbito sanitario se busca maximizar tanto la precisión (para minimizar falsos positivos) como la sensibilidad o TVP (para minimizar falsos negativos), por lo que **se busca obtener un valor alto de F1**. En este ejemplo el valor de F1 puede llegar a superar el 95% utilizando los ajustes y características correctos. Aunque más complicado, al igual que ocurre con el árbol de decisión, el modelo puede ser sobreajustado.
-
-*Consejo: editar la profundidad máxima del árbol es un buen punto de inicio para evitar el sobreajuste.*""")
- , style="padding-top:30px; padding-right:50px; text-align: justify; text-justify: inter-word;"
- ),
- )
-
-
-@module.server
-def ranForest_server(input, output, session):
-
- @reactive.Effect
- @reactive.event(input.ran_forest_show_how_info)
- def _():
- show_ran_forest_how_gen_button = input.ran_forest_show_how_info()
- if show_ran_forest_how_gen_button % 2 == 1:
- ui.update_action_button("ran_forest_show_how_info", label="¿Cómo se genera el modelo de bosque aleatorio? △")
- ui.insert_ui(
- ui.div({"id": "inserted-ran-forest-how-gen-info"},
- ui.markdown("""Todos los modelos siguen los mismos pasos para ser creados:
-- Primero debemos **elegir los ajustes del modelo** que queremos crear. En este caso, disponemos de los siguientes ajustes (la mayoría son análogos a los de árbol de decisión):
- - **Num estimators**: el número de árboles para generar el bosque.
- - **Criterion**: La función utilizada para medir la calidad de una división.
- - **Max Depth**: La profundidad máxima del árbol. Si es None, los nodos se expandirán hasta que todas las hojas sean puras o hasta que todas las hojas contengan menos muestras que min_samples_split.
- - **Min samples split**: El número mínimo de muestras requeridas para dividir un nodo interno.
- - **Min samples leaf**: El número mínimo de muestras requeridas para estar en un nodo hoja.
- - **Max features**: El número de características a considerar al buscar la mejor división.
-- Después debemos **elegir las características** que queremos usar para predecir el resultado. No todas las características pueden ser relevantes para el modelo y puede que nos encontremos algunas que aporten ruido a nuestros resultados. Si es la primera vez que creas el modelo, selecciona todas las características de momento.
-- Por último, **¡genera el modelo!**"""
- ),
- style="border: solid 0px grey; border-radius: 10px; background:#eceef1 ;margin-right:50px; padding:15px 20px 10px 20px; text-align: justify; text-justify: inter-word;",
- ),
- selector="#ran_forest_how_generate",
- where="beforeEnd",
- )
- else:
- ui.update_action_button("ran_forest_show_how_info", label="¿Cómo se genera el modelo de bosque aleatorio? ▽")
- ui.remove_ui("#inserted-ran-forest-how-gen-info")
-
- @output
- @render.image
- def ran_forest_expl_image():
- img: ImgData = {"src": str(explanation_img_path / "ran_forest_expl.png"), "height":"250px", "style":"display:block; margin-left:15%;"}
- return img
\ No newline at end of file
diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/train_searcher.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/train_searcher.py
deleted file mode 100644
index 1e7904889c0145f9fb740fd4ae8e45c08728b255..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/train_searcher.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import os, sys
-import numpy as np
-import scann
-import argparse
-import glob
-from multiprocessing import cpu_count
-from tqdm import tqdm
-
-from ldm.util import parallel_data_prefetch
-
-
-def search_bruteforce(searcher):
- return searcher.score_brute_force().build()
-
-
-def search_partioned_ah(searcher, dims_per_block, aiq_threshold, reorder_k,
- partioning_trainsize, num_leaves, num_leaves_to_search):
- return searcher.tree(num_leaves=num_leaves,
- num_leaves_to_search=num_leaves_to_search,
- training_sample_size=partioning_trainsize). \
- score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(reorder_k).build()
-
-
-def search_ah(searcher, dims_per_block, aiq_threshold, reorder_k):
- return searcher.score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(
- reorder_k).build()
-
-def load_datapool(dpath):
-
-
- def load_single_file(saved_embeddings):
- compressed = np.load(saved_embeddings)
- database = {key: compressed[key] for key in compressed.files}
- return database
-
- def load_multi_files(data_archive):
- database = {key: [] for key in data_archive[0].files}
- for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'):
- for key in d.files:
- database[key].append(d[key])
-
- return database
-
- print(f'Load saved patch embedding from "{dpath}"')
- file_content = glob.glob(os.path.join(dpath, '*.npz'))
-
- if len(file_content) == 1:
- data_pool = load_single_file(file_content[0])
- elif len(file_content) > 1:
- data = [np.load(f) for f in file_content]
- prefetched_data = parallel_data_prefetch(load_multi_files, data,
- n_proc=min(len(data), cpu_count()), target_data_type='dict')
-
- data_pool = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in prefetched_data[0].keys()}
- else:
- raise ValueError(f'No npz-files in specified path "{dpath}" is this directory existing?')
-
- print(f'Finished loading of retrieval database of length {data_pool["embedding"].shape[0]}.')
- return data_pool
-
-
-def train_searcher(opt,
- metric='dot_product',
- partioning_trainsize=None,
- reorder_k=None,
- # todo tune
- aiq_thld=0.2,
- dims_per_block=2,
- num_leaves=None,
- num_leaves_to_search=None,):
-
- data_pool = load_datapool(opt.database)
- k = opt.knn
-
- if not reorder_k:
- reorder_k = 2 * k
-
- # normalize
- # embeddings =
- searcher = scann.scann_ops_pybind.builder(data_pool['embedding'] / np.linalg.norm(data_pool['embedding'], axis=1)[:, np.newaxis], k, metric)
- pool_size = data_pool['embedding'].shape[0]
-
- print(*(['#'] * 100))
- print('Initializing scaNN searcher with the following values:')
- print(f'k: {k}')
- print(f'metric: {metric}')
- print(f'reorder_k: {reorder_k}')
- print(f'anisotropic_quantization_threshold: {aiq_thld}')
- print(f'dims_per_block: {dims_per_block}')
- print(*(['#'] * 100))
- print('Start training searcher....')
- print(f'N samples in pool is {pool_size}')
-
- # this reflects the recommended design choices proposed at
- # https://github.com/google-research/google-research/blob/aca5f2e44e301af172590bb8e65711f0c9ee0cfd/scann/docs/algorithms.md
- if pool_size < 2e4:
- print('Using brute force search.')
- searcher = search_bruteforce(searcher)
- elif 2e4 <= pool_size and pool_size < 1e5:
- print('Using asymmetric hashing search and reordering.')
- searcher = search_ah(searcher, dims_per_block, aiq_thld, reorder_k)
- else:
- print('Using using partioning, asymmetric hashing search and reordering.')
-
- if not partioning_trainsize:
- partioning_trainsize = data_pool['embedding'].shape[0] // 10
- if not num_leaves:
- num_leaves = int(np.sqrt(pool_size))
-
- if not num_leaves_to_search:
- num_leaves_to_search = max(num_leaves // 20, 1)
-
- print('Partitioning params:')
- print(f'num_leaves: {num_leaves}')
- print(f'num_leaves_to_search: {num_leaves_to_search}')
- # self.searcher = self.search_ah(searcher, dims_per_block, aiq_thld, reorder_k)
- searcher = search_partioned_ah(searcher, dims_per_block, aiq_thld, reorder_k,
- partioning_trainsize, num_leaves, num_leaves_to_search)
-
- print('Finish training searcher')
- searcher_savedir = opt.target_path
- os.makedirs(searcher_savedir, exist_ok=True)
- searcher.serialize(searcher_savedir)
- print(f'Saved trained searcher under "{searcher_savedir}"')
-
-if __name__ == '__main__':
- sys.path.append(os.getcwd())
- parser = argparse.ArgumentParser()
- parser.add_argument('--database',
- '-d',
- default='data/rdm/retrieval_databases/openimages',
- type=str,
- help='path to folder containing the clip feature of the database')
- parser.add_argument('--target_path',
- '-t',
- default='data/rdm/searchers/openimages',
- type=str,
- help='path to the target folder where the searcher shall be stored.')
- parser.add_argument('--knn',
- '-k',
- default=20,
- type=int,
- help='number of nearest neighbors, for which the searcher shall be optimized')
-
- opt, _ = parser.parse_known_args()
-
- train_searcher(opt,)
\ No newline at end of file
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/preprocess.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/preprocess.py
deleted file mode 100644
index 0feee6e2458ee770d1b94c53a043b1146b580cef..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/preprocess.py
+++ /dev/null
@@ -1,113 +0,0 @@
-
-import os
-import torch
-import numpy as np
-from tqdm import tqdm
-from pathlib import Path
-import soundfile
-import resampy
-
-from ppg_extractor import load_model
-import encoder.inference as Encoder
-from encoder.audio import preprocess_wav
-from encoder import audio
-from utils.f0_utils import compute_f0
-
-from torch.multiprocessing import Pool, cpu_count
-from functools import partial
-
-SAMPLE_RATE=16000
-
-def _compute_bnf(
- wav: any,
- output_fpath: str,
- device: torch.device,
- ppg_model_local: any,
-):
- """
- Compute CTC-Attention Seq2seq ASR encoder bottle-neck features (BNF).
- """
- ppg_model_local.to(device)
- wav_tensor = torch.from_numpy(wav).float().to(device).unsqueeze(0)
- wav_length = torch.LongTensor([wav.shape[0]]).to(device)
- with torch.no_grad():
- bnf = ppg_model_local(wav_tensor, wav_length)
- bnf_npy = bnf.squeeze(0).cpu().numpy()
- np.save(output_fpath, bnf_npy, allow_pickle=False)
- return bnf_npy, len(bnf_npy)
-
-def _compute_f0_from_wav(wav, output_fpath):
- """Compute merged f0 values."""
- f0 = compute_f0(wav, SAMPLE_RATE)
- np.save(output_fpath, f0, allow_pickle=False)
- return f0, len(f0)
-
-def _compute_spkEmbed(wav, output_fpath, encoder_model_local, device):
- Encoder.set_model(encoder_model_local)
- # Compute where to split the utterance into partials and pad if necessary
- wave_slices, mel_slices = Encoder.compute_partial_slices(len(wav), rate=1.3, min_pad_coverage=0.75)
- max_wave_length = wave_slices[-1].stop
- if max_wave_length >= len(wav):
- wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
-
- # Split the utterance into partials
- frames = audio.wav_to_mel_spectrogram(wav)
- frames_batch = np.array([frames[s] for s in mel_slices])
- partial_embeds = Encoder.embed_frames_batch(frames_batch)
-
- # Compute the utterance embedding from the partial embeddings
- raw_embed = np.mean(partial_embeds, axis=0)
- embed = raw_embed / np.linalg.norm(raw_embed, 2)
-
- np.save(output_fpath, embed, allow_pickle=False)
- return embed, len(embed)
-
-def preprocess_one(wav_path, out_dir, device, ppg_model_local, encoder_model_local):
- # wav = preprocess_wav(wav_path)
- # try:
- wav, sr = soundfile.read(wav_path)
- if len(wav) < sr:
- return None, sr, len(wav)
- if sr != SAMPLE_RATE:
- wav = resampy.resample(wav, sr, SAMPLE_RATE)
- sr = SAMPLE_RATE
- utt_id = os.path.basename(wav_path).rstrip(".wav")
-
- _, length_bnf = _compute_bnf(output_fpath=f"{out_dir}/bnf/{utt_id}.ling_feat.npy", wav=wav, device=device, ppg_model_local=ppg_model_local)
- _, length_f0 = _compute_f0_from_wav(output_fpath=f"{out_dir}/f0/{utt_id}.f0.npy", wav=wav)
- _, length_embed = _compute_spkEmbed(output_fpath=f"{out_dir}/embed/{utt_id}.npy", device=device, encoder_model_local=encoder_model_local, wav=wav)
-
-def preprocess_dataset(datasets_root, dataset, out_dir, n_processes, ppg_encoder_model_fpath, speaker_encoder_model):
- # Glob wav files
- wav_file_list = sorted(Path(f"{datasets_root}/{dataset}").glob("**/*.wav"))
- print(f"Globbed {len(wav_file_list)} wav files.")
-
- out_dir.joinpath("bnf").mkdir(exist_ok=True, parents=True)
- out_dir.joinpath("f0").mkdir(exist_ok=True, parents=True)
- out_dir.joinpath("embed").mkdir(exist_ok=True, parents=True)
- ppg_model_local = load_model(ppg_encoder_model_fpath, "cpu")
- encoder_model_local = Encoder.load_model(speaker_encoder_model, "cpu")
- if n_processes is None:
- n_processes = cpu_count()
-
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- func = partial(preprocess_one, out_dir=out_dir, ppg_model_local=ppg_model_local, encoder_model_local=encoder_model_local, device=device)
- job = Pool(n_processes).imap(func, wav_file_list)
- list(tqdm(job, "Preprocessing", len(wav_file_list), unit="wav"))
-
- # finish processing and mark
- t_fid_file = out_dir.joinpath("train_fidlist.txt").open("w", encoding="utf-8")
- d_fid_file = out_dir.joinpath("dev_fidlist.txt").open("w", encoding="utf-8")
- e_fid_file = out_dir.joinpath("eval_fidlist.txt").open("w", encoding="utf-8")
- for file in sorted(out_dir.joinpath("f0").glob("*.npy")):
- id = os.path.basename(file).split(".f0.npy")[0]
- if id.endswith("01"):
- d_fid_file.write(id + "\n")
- elif id.endswith("09"):
- e_fid_file.write(id + "\n")
- else:
- t_fid_file.write(id + "\n")
- t_fid_file.close()
- d_fid_file.close()
- e_fid_file.close()
- return len(wav_file_list)
diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/toolbox/__init__.py b/spaces/Kevin676/Real-Time-Voice-Cloning/toolbox/__init__.py
deleted file mode 100644
index 531d6adef076007afd6116eb6472485f540e80de..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Real-Time-Voice-Cloning/toolbox/__init__.py
+++ /dev/null
@@ -1,357 +0,0 @@
-from toolbox.ui import UI
-from encoder import inference as encoder
-from synthesizer.inference import Synthesizer
-from vocoder import inference as vocoder
-from pathlib import Path
-from time import perf_counter as timer
-from toolbox.utterance import Utterance
-import numpy as np
-import traceback
-import sys
-import torch
-import librosa
-from audioread.exceptions import NoBackendError
-
-# Use this directory structure for your datasets, or modify it to fit your needs
-recognized_datasets = [
- "LibriSpeech/dev-clean",
- "LibriSpeech/dev-other",
- "LibriSpeech/test-clean",
- "LibriSpeech/test-other",
- "LibriSpeech/train-clean-100",
- "LibriSpeech/train-clean-360",
- "LibriSpeech/train-other-500",
- "LibriTTS/dev-clean",
- "LibriTTS/dev-other",
- "LibriTTS/test-clean",
- "LibriTTS/test-other",
- "LibriTTS/train-clean-100",
- "LibriTTS/train-clean-360",
- "LibriTTS/train-other-500",
- "LJSpeech-1.1",
- "VoxCeleb1/wav",
- "VoxCeleb1/test_wav",
- "VoxCeleb2/dev/aac",
- "VoxCeleb2/test/aac",
- "VCTK-Corpus/wav48",
-]
-
-#Maximum of generated wavs to keep on memory
-MAX_WAVES = 15
-
-class Toolbox:
- def __init__(self, datasets_root, enc_models_dir, syn_models_dir, voc_models_dir, seed, no_mp3_support):
- if not no_mp3_support:
- try:
- librosa.load("samples/6829_00000.mp3")
- except NoBackendError:
- print("Librosa will be unable to open mp3 files if additional software is not installed.\n"
- "Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.")
- exit(-1)
- self.no_mp3_support = no_mp3_support
- sys.excepthook = self.excepthook
- self.datasets_root = datasets_root
- self.utterances = set()
- self.current_generated = (None, None, None, None) # speaker_name, spec, breaks, wav
-
- self.synthesizer = None # type: Synthesizer
- self.current_wav = None
- self.waves_list = []
- self.waves_count = 0
- self.waves_namelist = []
-
- # Check for webrtcvad (enables removal of silences in vocoder output)
- try:
- import webrtcvad
- self.trim_silences = True
- except:
- self.trim_silences = False
-
- # Initialize the events and the interface
- self.ui = UI()
- self.reset_ui(enc_models_dir, syn_models_dir, voc_models_dir, seed)
- self.setup_events()
- self.ui.start()
-
- def excepthook(self, exc_type, exc_value, exc_tb):
- traceback.print_exception(exc_type, exc_value, exc_tb)
- self.ui.log("Exception: %s" % exc_value)
-
- def setup_events(self):
- # Dataset, speaker and utterance selection
- self.ui.browser_load_button.clicked.connect(lambda: self.load_from_browser())
- random_func = lambda level: lambda: self.ui.populate_browser(self.datasets_root,
- recognized_datasets,
- level)
- self.ui.random_dataset_button.clicked.connect(random_func(0))
- self.ui.random_speaker_button.clicked.connect(random_func(1))
- self.ui.random_utterance_button.clicked.connect(random_func(2))
- self.ui.dataset_box.currentIndexChanged.connect(random_func(1))
- self.ui.speaker_box.currentIndexChanged.connect(random_func(2))
-
- # Model selection
- self.ui.encoder_box.currentIndexChanged.connect(self.init_encoder)
- def func():
- self.synthesizer = None
- self.ui.synthesizer_box.currentIndexChanged.connect(func)
- self.ui.vocoder_box.currentIndexChanged.connect(self.init_vocoder)
-
- # Utterance selection
- func = lambda: self.load_from_browser(self.ui.browse_file())
- self.ui.browser_browse_button.clicked.connect(func)
- func = lambda: self.ui.draw_utterance(self.ui.selected_utterance, "current")
- self.ui.utterance_history.currentIndexChanged.connect(func)
- func = lambda: self.ui.play(self.ui.selected_utterance.wav, Synthesizer.sample_rate)
- self.ui.play_button.clicked.connect(func)
- self.ui.stop_button.clicked.connect(self.ui.stop)
- self.ui.record_button.clicked.connect(self.record)
-
- #Audio
- self.ui.setup_audio_devices(Synthesizer.sample_rate)
-
- #Wav playback & save
- func = lambda: self.replay_last_wav()
- self.ui.replay_wav_button.clicked.connect(func)
- func = lambda: self.export_current_wave()
- self.ui.export_wav_button.clicked.connect(func)
- self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav)
-
- # Generation
- func = lambda: self.synthesize() or self.vocode()
- self.ui.generate_button.clicked.connect(func)
- self.ui.synthesize_button.clicked.connect(self.synthesize)
- self.ui.vocode_button.clicked.connect(self.vocode)
- self.ui.random_seed_checkbox.clicked.connect(self.update_seed_textbox)
-
- # UMAP legend
- self.ui.clear_button.clicked.connect(self.clear_utterances)
-
- def set_current_wav(self, index):
- self.current_wav = self.waves_list[index]
-
- def export_current_wave(self):
- self.ui.save_audio_file(self.current_wav, Synthesizer.sample_rate)
-
- def replay_last_wav(self):
- self.ui.play(self.current_wav, Synthesizer.sample_rate)
-
- def reset_ui(self, encoder_models_dir, synthesizer_models_dir, vocoder_models_dir, seed):
- self.ui.populate_browser(self.datasets_root, recognized_datasets, 0, True)
- self.ui.populate_models(encoder_models_dir, synthesizer_models_dir, vocoder_models_dir)
- self.ui.populate_gen_options(seed, self.trim_silences)
-
- def load_from_browser(self, fpath=None):
- if fpath is None:
- fpath = Path(self.datasets_root,
- self.ui.current_dataset_name,
- self.ui.current_speaker_name,
- self.ui.current_utterance_name)
- name = str(fpath.relative_to(self.datasets_root))
- speaker_name = self.ui.current_dataset_name + '_' + self.ui.current_speaker_name
-
- # Select the next utterance
- if self.ui.auto_next_checkbox.isChecked():
- self.ui.browser_select_next()
- elif fpath == "":
- return
- else:
- name = fpath.name
- speaker_name = fpath.parent.name
-
- if fpath.suffix.lower() == ".mp3" and self.no_mp3_support:
- self.ui.log("Error: No mp3 file argument was passed but an mp3 file was used")
- return
-
- # Get the wav from the disk. We take the wav with the vocoder/synthesizer format for
- # playback, so as to have a fair comparison with the generated audio
- wav = Synthesizer.load_preprocess_wav(fpath)
- self.ui.log("Loaded %s" % name)
-
- self.add_real_utterance(wav, name, speaker_name)
-
- def record(self):
- wav = self.ui.record_one(encoder.sampling_rate, 5)
- if wav is None:
- return
- self.ui.play(wav, encoder.sampling_rate)
-
- speaker_name = "user01"
- name = speaker_name + "_rec_%05d" % np.random.randint(100000)
- self.add_real_utterance(wav, name, speaker_name)
-
- def add_real_utterance(self, wav, name, speaker_name):
- # Compute the mel spectrogram
- spec = Synthesizer.make_spectrogram(wav)
- self.ui.draw_spec(spec, "current")
-
- # Compute the embedding
- if not encoder.is_loaded():
- self.init_encoder()
- encoder_wav = encoder.preprocess_wav(wav)
- embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True)
-
- # Add the utterance
- utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, False)
- self.utterances.add(utterance)
- self.ui.register_utterance(utterance)
-
- # Plot it
- self.ui.draw_embed(embed, name, "current")
- self.ui.draw_umap_projections(self.utterances)
-
- def clear_utterances(self):
- self.utterances.clear()
- self.ui.draw_umap_projections(self.utterances)
-
- def synthesize(self):
- self.ui.log("Generating the mel spectrogram...")
- self.ui.set_loading(1)
-
- # Update the synthesizer random seed
- if self.ui.random_seed_checkbox.isChecked():
- seed = int(self.ui.seed_textbox.text())
- self.ui.populate_gen_options(seed, self.trim_silences)
- else:
- seed = None
-
- if seed is not None:
- torch.manual_seed(seed)
-
- # Synthesize the spectrogram
- if self.synthesizer is None or seed is not None:
- self.init_synthesizer()
-
- texts = self.ui.text_prompt.toPlainText().split("\n")
- embed = self.ui.selected_utterance.embed
- embeds = [embed] * len(texts)
- specs = self.synthesizer.synthesize_spectrograms(texts, embeds)
- breaks = [spec.shape[1] for spec in specs]
- spec = np.concatenate(specs, axis=1)
-
- self.ui.draw_spec(spec, "generated")
- self.current_generated = (self.ui.selected_utterance.speaker_name, spec, breaks, None)
- self.ui.set_loading(0)
-
- def vocode(self):
- speaker_name, spec, breaks, _ = self.current_generated
- assert spec is not None
-
- # Initialize the vocoder model and make it determinstic, if user provides a seed
- if self.ui.random_seed_checkbox.isChecked():
- seed = int(self.ui.seed_textbox.text())
- self.ui.populate_gen_options(seed, self.trim_silences)
- else:
- seed = None
-
- if seed is not None:
- torch.manual_seed(seed)
-
- # Synthesize the waveform
- if not vocoder.is_loaded() or seed is not None:
- self.init_vocoder()
-
- def vocoder_progress(i, seq_len, b_size, gen_rate):
- real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000
- line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \
- % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor)
- self.ui.log(line, "overwrite")
- self.ui.set_loading(i, seq_len)
- if self.ui.current_vocoder_fpath is not None:
- self.ui.log("")
- wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress)
- else:
- self.ui.log("Waveform generation with Griffin-Lim... ")
- wav = Synthesizer.griffin_lim(spec)
- self.ui.set_loading(0)
- self.ui.log(" Done!", "append")
-
- # Add breaks
- b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size)
- b_starts = np.concatenate(([0], b_ends[:-1]))
- wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)]
- breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks)
- wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])
-
- # Trim excessive silences
- if self.ui.trim_silences_checkbox.isChecked():
- wav = encoder.preprocess_wav(wav)
-
- # Play it
- wav = wav / np.abs(wav).max() * 0.97
- self.ui.play(wav, Synthesizer.sample_rate)
-
- # Name it (history displayed in combobox)
- # TODO better naming for the combobox items?
- wav_name = str(self.waves_count + 1)
-
- #Update waves combobox
- self.waves_count += 1
- if self.waves_count > MAX_WAVES:
- self.waves_list.pop()
- self.waves_namelist.pop()
- self.waves_list.insert(0, wav)
- self.waves_namelist.insert(0, wav_name)
-
- self.ui.waves_cb.disconnect()
- self.ui.waves_cb_model.setStringList(self.waves_namelist)
- self.ui.waves_cb.setCurrentIndex(0)
- self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav)
-
- # Update current wav
- self.set_current_wav(0)
-
- #Enable replay and save buttons:
- self.ui.replay_wav_button.setDisabled(False)
- self.ui.export_wav_button.setDisabled(False)
-
- # Compute the embedding
- # TODO: this is problematic with different sampling rates, gotta fix it
- if not encoder.is_loaded():
- self.init_encoder()
- encoder_wav = encoder.preprocess_wav(wav)
- embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True)
-
- # Add the utterance
- name = speaker_name + "_gen_%05d" % np.random.randint(100000)
- utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True)
- self.utterances.add(utterance)
-
- # Plot it
- self.ui.draw_embed(embed, name, "generated")
- self.ui.draw_umap_projections(self.utterances)
-
- def init_encoder(self):
- model_fpath = self.ui.current_encoder_fpath
-
- self.ui.log("Loading the encoder %s... " % model_fpath)
- self.ui.set_loading(1)
- start = timer()
- encoder.load_model(model_fpath)
- self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append")
- self.ui.set_loading(0)
-
- def init_synthesizer(self):
- model_fpath = self.ui.current_synthesizer_fpath
-
- self.ui.log("Loading the synthesizer %s... " % model_fpath)
- self.ui.set_loading(1)
- start = timer()
- self.synthesizer = Synthesizer(model_fpath)
- self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append")
- self.ui.set_loading(0)
-
- def init_vocoder(self):
- model_fpath = self.ui.current_vocoder_fpath
- # Case of Griffin-lim
- if model_fpath is None:
- return
-
- self.ui.log("Loading the vocoder %s... " % model_fpath)
- self.ui.set_loading(1)
- start = timer()
- vocoder.load_model(model_fpath)
- self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append")
- self.ui.set_loading(0)
-
- def update_seed_textbox(self):
- self.ui.update_seed_textbox()
diff --git a/spaces/Knowles-Lab/tiger/app.py b/spaces/Knowles-Lab/tiger/app.py
deleted file mode 100644
index b472309b52ef5773814ef42418cb875c1e3ab5fd..0000000000000000000000000000000000000000
--- a/spaces/Knowles-Lab/tiger/app.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import os
-import tiger
-import pandas as pd
-import streamlit as st
-from pathlib import Path
-
-ENTRY_METHODS = dict(
- manual='Manual entry of single transcript',
- fasta="Fasta file upload (supports multiple transcripts if they have unique ID's)"
-)
-
-
-@st.cache_data
-def convert_df(df):
- # IMPORTANT: Cache the conversion to prevent computation on every rerun
- return df.to_csv().encode('utf-8')
-
-
-def mode_change_callback():
- if st.session_state.mode in {tiger.RUN_MODES['all'], tiger.RUN_MODES['titration']}: # TODO: support titration
- st.session_state.check_off_targets = False
- st.session_state.disable_off_target_checkbox = True
- else:
- st.session_state.disable_off_target_checkbox = False
-
-
-def progress_update(update_text, percent_complete):
- with progress.container():
- st.write(update_text)
- st.progress(percent_complete / 100)
-
-
-def initiate_run():
-
- # initialize state variables
- st.session_state.transcripts = None
- st.session_state.input_error = None
- st.session_state.on_target = None
- st.session_state.titration = None
- st.session_state.off_target = None
-
- # initialize transcript DataFrame
- transcripts = pd.DataFrame(columns=[tiger.ID_COL, tiger.SEQ_COL])
-
- # manual entry
- if st.session_state.entry_method == ENTRY_METHODS['manual']:
- transcripts = pd.DataFrame({
- tiger.ID_COL: ['ManualEntry'],
- tiger.SEQ_COL: [st.session_state.manual_entry]
- }).set_index(tiger.ID_COL)
-
- # fasta file upload
- elif st.session_state.entry_method == ENTRY_METHODS['fasta']:
- if st.session_state.fasta_entry is not None:
- fasta_path = st.session_state.fasta_entry.name
- with open(fasta_path, 'w') as f:
- f.write(st.session_state.fasta_entry.getvalue().decode('utf-8'))
- transcripts = tiger.load_transcripts([fasta_path], enforce_unique_ids=False)
- os.remove(fasta_path)
-
- # convert to upper case as used by tokenizer
- transcripts[tiger.SEQ_COL] = transcripts[tiger.SEQ_COL].apply(lambda s: s.upper().replace('U', 'T'))
-
- # ensure all transcripts have unique identifiers
- if transcripts.index.has_duplicates:
- st.session_state.input_error = "Duplicate transcript ID's detected in fasta file"
-
- # ensure all transcripts only contain nucleotides A, C, G, T, and wildcard N
- elif not all(transcripts[tiger.SEQ_COL].apply(lambda s: set(s).issubset(tiger.NUCLEOTIDE_TOKENS.keys()))):
- st.session_state.input_error = 'Transcript(s) must only contain upper or lower case A, C, G, and Ts or Us'
-
- # ensure all transcripts satisfy length requirements
- elif any(transcripts[tiger.SEQ_COL].apply(lambda s: len(s) < tiger.TARGET_LEN)):
- st.session_state.input_error = 'Transcript(s) must be at least {:d} bases.'.format(tiger.TARGET_LEN)
-
- # run model if we have any transcripts
- elif len(transcripts) > 0:
- st.session_state.transcripts = transcripts
-
-
-if __name__ == '__main__':
-
- # app initialization
- if 'mode' not in st.session_state:
- st.session_state.mode = tiger.RUN_MODES['all']
- st.session_state.disable_off_target_checkbox = True
- if 'entry_method' not in st.session_state:
- st.session_state.entry_method = ENTRY_METHODS['manual']
- if 'transcripts' not in st.session_state:
- st.session_state.transcripts = None
- if 'input_error' not in st.session_state:
- st.session_state.input_error = None
- if 'on_target' not in st.session_state:
- st.session_state.on_target = None
- if 'titration' not in st.session_state:
- st.session_state.titration = None
- if 'off_target' not in st.session_state:
- st.session_state.off_target = None
-
- # title and documentation
- st.markdown(Path('tiger.md').read_text(), unsafe_allow_html=True)
- st.divider()
-
- # mode selection
- col1, col2 = st.columns([0.65, 0.35])
- with col1:
- st.radio(
- label='What do you want to predict?',
- options=tuple(tiger.RUN_MODES.values()),
- key='mode',
- on_change=mode_change_callback,
- disabled=st.session_state.transcripts is not None,
- )
- with col2:
- st.checkbox(
- label='Find off-target effects (slow)',
- key='check_off_targets',
- disabled=st.session_state.disable_off_target_checkbox or st.session_state.transcripts is not None
- )
-
- # transcript entry
- st.selectbox(
- label='How would you like to provide transcript(s) of interest?',
- options=ENTRY_METHODS.values(),
- key='entry_method',
- disabled=st.session_state.transcripts is not None
- )
- if st.session_state.entry_method == ENTRY_METHODS['manual']:
- st.text_input(
- label='Enter a target transcript:',
- key='manual_entry',
- placeholder='Upper or lower case',
- disabled=st.session_state.transcripts is not None
- )
- elif st.session_state.entry_method == ENTRY_METHODS['fasta']:
- st.file_uploader(
- label='Upload a fasta file:',
- key='fasta_entry',
- disabled=st.session_state.transcripts is not None
- )
-
- # let's go!
- st.button(label='Get predictions!', on_click=initiate_run, disabled=st.session_state.transcripts is not None)
- progress = st.empty()
-
- # input error
- error = st.empty()
- if st.session_state.input_error is not None:
- error.error(st.session_state.input_error, icon="🚨")
- else:
- error.empty()
-
- # on-target results
- on_target_results = st.empty()
- if st.session_state.on_target is not None:
- with on_target_results.container():
- st.write('On-target predictions:', st.session_state.on_target)
- st.download_button(
- label='Download on-target predictions',
- data=convert_df(st.session_state.on_target),
- file_name='on_target.csv',
- mime='text/csv'
- )
- else:
- on_target_results.empty()
-
- # titration results
- titration_results = st.empty()
- if st.session_state.titration is not None:
- with titration_results.container():
- st.write('Titration predictions:', st.session_state.titration)
- st.download_button(
- label='Download titration predictions',
- data=convert_df(st.session_state.titration),
- file_name='titration.csv',
- mime='text/csv'
- )
- else:
- titration_results.empty()
-
- # off-target results
- off_target_results = st.empty()
- if st.session_state.off_target is not None:
- with off_target_results.container():
- if len(st.session_state.off_target) > 0:
- st.write('Off-target predictions:', st.session_state.off_target)
- st.download_button(
- label='Download off-target predictions',
- data=convert_df(st.session_state.off_target),
- file_name='off_target.csv',
- mime='text/csv'
- )
- else:
- st.write('We did not find any off-target effects!')
- else:
- off_target_results.empty()
-
- # keep trying to run model until we clear inputs (streamlit UI changes can induce race-condition reruns)
- if st.session_state.transcripts is not None:
- st.session_state.on_target, st.session_state.titration, st.session_state.off_target = tiger.tiger_exhibit(
- transcripts=st.session_state.transcripts,
- mode={v: k for k, v in tiger.RUN_MODES.items()}[st.session_state.mode],
- check_off_targets=st.session_state.check_off_targets,
- status_update_fn=progress_update
- )
- st.session_state.transcripts = None
- st.experimental_rerun()
diff --git a/spaces/KyanChen/FunSR/models/cnn_models/basic.py b/spaces/KyanChen/FunSR/models/cnn_models/basic.py
deleted file mode 100644
index 9332e56dc5d4ba928ee93de2e6d8cd278ea6635a..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/FunSR/models/cnn_models/basic.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from . import common
-
-import torch.nn as nn
-
-
-def make_model(args, parent=False):
- return BASIC(args)
-
-
-class BASIC(nn.Module):
- def __init__(self, args, conv=common.default_conv):
- super(BASIC, self).__init__()
-
- n_resblocks = args.n_resblocks
- n_feats = args.n_feats
- kernel_size = 3
- scale = args.scale[0]
- act = nn.ReLU(True)
-
- # define head module
- m_head = [conv(args.n_colors, n_feats, kernel_size)]
-
- # define body module
- m_body = [
- common.ResBlock(
- conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
- ) for _ in range(n_resblocks)
- ]
- m_body.append(conv(n_feats, n_feats, kernel_size))
-
- # define tail module
- m_tail = [
- common.Upsampler(conv, scale, n_feats),
- conv(n_feats, args.n_colors, kernel_size)
- ]
-
- self.head = nn.Sequential(*m_head)
- self.body = nn.Sequential(*m_body)
- self.tail = nn.Sequential(*m_tail)
-
- def forward(self, x):
- x = self.head(x)
- res = self.body(x)
- res += x
- x = self.tail(x)
-
- return x
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/KyanChen/RSPrompter/mmdet/utils/misc.py b/spaces/KyanChen/RSPrompter/mmdet/utils/misc.py
deleted file mode 100644
index 51cb2af8dbfc25e569d4f2d0f16fab12f632dbd5..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/utils/misc.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import glob
-import os
-import os.path as osp
-import warnings
-from typing import Union
-
-from mmengine.config import Config, ConfigDict
-from mmengine.logging import print_log
-
-
-def find_latest_checkpoint(path, suffix='pth'):
- """Find the latest checkpoint from the working directory.
-
- Args:
- path(str): The path to find checkpoints.
- suffix(str): File extension.
- Defaults to pth.
-
- Returns:
- latest_path(str | None): File path of the latest checkpoint.
- References:
- .. [1] https://github.com/microsoft/SoftTeacher
- /blob/main/ssod/utils/patch.py
- """
- if not osp.exists(path):
- warnings.warn('The path of checkpoints does not exist.')
- return None
- if osp.exists(osp.join(path, f'latest.{suffix}')):
- return osp.join(path, f'latest.{suffix}')
-
- checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
- if len(checkpoints) == 0:
- warnings.warn('There are no checkpoints in the path.')
- return None
- latest = -1
- latest_path = None
- for checkpoint in checkpoints:
- count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
- if count > latest:
- latest = count
- latest_path = checkpoint
- return latest_path
-
-
-def update_data_root(cfg, logger=None):
- """Update data root according to env MMDET_DATASETS.
-
- If set env MMDET_DATASETS, update cfg.data_root according to
- MMDET_DATASETS. Otherwise, using cfg.data_root as default.
-
- Args:
- cfg (:obj:`Config`): The model config need to modify
- logger (logging.Logger | str | None): the way to print msg
- """
- assert isinstance(cfg, Config), \
- f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
-
- if 'MMDET_DATASETS' in os.environ:
- dst_root = os.environ['MMDET_DATASETS']
- print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
- f'Using {dst_root} as data root.')
- else:
- return
-
- assert isinstance(cfg, Config), \
- f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
-
- def update(cfg, src_str, dst_str):
- for k, v in cfg.items():
- if isinstance(v, ConfigDict):
- update(cfg[k], src_str, dst_str)
- if isinstance(v, str) and src_str in v:
- cfg[k] = v.replace(src_str, dst_str)
-
- update(cfg.data, cfg.data_root, dst_root)
- cfg.data_root = dst_root
-
-
-def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
- """Get the test dataset pipeline from entire config.
-
- Args:
- cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
- file or a ``ConfigDict``.
-
- Returns:
- :obj:`ConfigDict`: the config of test dataset.
- """
- if isinstance(cfg, str):
- cfg = Config.fromfile(cfg)
-
- def _get_test_pipeline_cfg(dataset_cfg):
- if 'pipeline' in dataset_cfg:
- return dataset_cfg.pipeline
- # handle dataset wrapper
- elif 'dataset' in dataset_cfg:
- return _get_test_pipeline_cfg(dataset_cfg.dataset)
- # handle dataset wrappers like ConcatDataset
- elif 'datasets' in dataset_cfg:
- return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
-
- raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
-
- return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
diff --git a/spaces/L0SG/BigVGAN/inference_e2e.py b/spaces/L0SG/BigVGAN/inference_e2e.py
deleted file mode 100644
index 9d2ad6080c0498514d64a9243778edf525f77854..0000000000000000000000000000000000000000
--- a/spaces/L0SG/BigVGAN/inference_e2e.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
-# LICENSE is in incl_licenses directory.
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import glob
-import os
-import numpy as np
-import argparse
-import json
-import torch
-from scipy.io.wavfile import write
-from env import AttrDict
-from meldataset import MAX_WAV_VALUE
-from models import BigVGAN as Generator
-
-h = None
-device = None
-torch.backends.cudnn.benchmark = False
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '*')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return ''
- return sorted(cp_list)[-1]
-
-
-def inference(a, h):
- generator = Generator(h).to(device)
-
- state_dict_g = load_checkpoint(a.checkpoint_file, device)
- generator.load_state_dict(state_dict_g['generator'])
-
- filelist = os.listdir(a.input_mels_dir)
-
- os.makedirs(a.output_dir, exist_ok=True)
-
- generator.eval()
- generator.remove_weight_norm()
- with torch.no_grad():
- for i, filname in enumerate(filelist):
- # load the mel spectrogram in .npy format
- x = np.load(os.path.join(a.input_mels_dir, filname))
- x = torch.FloatTensor(x).to(device)
- if len(x.shape) == 2:
- x = x.unsqueeze(0)
-
- y_g_hat = generator(x)
-
- audio = y_g_hat.squeeze()
- audio = audio * MAX_WAV_VALUE
- audio = audio.cpu().numpy().astype('int16')
-
- output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated_e2e.wav')
- write(output_file, h.sampling_rate, audio)
- print(output_file)
-
-
-def main():
- print('Initializing Inference Process..')
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--input_mels_dir', default='test_mel_files')
- parser.add_argument('--output_dir', default='generated_files_from_mel')
- parser.add_argument('--checkpoint_file', required=True)
-
- a = parser.parse_args()
-
- config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
- with open(config_file) as f:
- data = f.read()
-
- global h
- json_config = json.loads(data)
- h = AttrDict(json_config)
-
- torch.manual_seed(h.seed)
- global device
- if torch.cuda.is_available():
- torch.cuda.manual_seed(h.seed)
- device = torch.device('cuda')
- else:
- device = torch.device('cpu')
-
- inference(a, h)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_datasets/icdar2015.py b/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_datasets/icdar2015.py
deleted file mode 100644
index f711c06dce76d53b8737288c8de318e6f90ce585..0000000000000000000000000000000000000000
--- a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_datasets/icdar2015.py
+++ /dev/null
@@ -1,18 +0,0 @@
-dataset_type = 'IcdarDataset'
-data_root = 'data/icdar2015'
-
-train = dict(
- type=dataset_type,
- ann_file=f'{data_root}/instances_training.json',
- img_prefix=f'{data_root}/imgs',
- pipeline=None)
-
-test = dict(
- type=dataset_type,
- ann_file=f'{data_root}/instances_test.json',
- img_prefix=f'{data_root}/imgs',
- pipeline=None)
-
-train_list = [train]
-
-test_list = [test]
diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/panet/panet_r50_fpem_ffm_600e_icdar2017.py b/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/panet/panet_r50_fpem_ffm_600e_icdar2017.py
deleted file mode 100644
index 0e9768d4742e845a45bd343d70bd06f3cb0e4fcb..0000000000000000000000000000000000000000
--- a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/panet/panet_r50_fpem_ffm_600e_icdar2017.py
+++ /dev/null
@@ -1,33 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/schedules/schedule_adam_600e.py',
- '../../_base_/det_models/panet_r50_fpem_ffm.py',
- '../../_base_/det_datasets/icdar2017.py',
- '../../_base_/det_pipelines/panet_pipeline.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline_icdar2017 = {{_base_.train_pipeline_icdar2017}}
-test_pipeline_icdar2017 = {{_base_.test_pipeline_icdar2017}}
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- val_dataloader=dict(samples_per_gpu=1),
- test_dataloader=dict(samples_per_gpu=1),
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline_icdar2017),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_icdar2017),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_icdar2017))
-
-evaluation = dict(interval=10, metric='hmean-iou')
diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_transforms.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_transforms.py
deleted file mode 100644
index 69b6d1c4b5724a3ef61f8bc3d64fc45c5e51e270..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_transforms.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- #unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- unnormalized_derivatives_ = torch.zeros((1, 1, unnormalized_derivatives.size(2), unnormalized_derivatives.size(3)+2))
- unnormalized_derivatives_[...,1:-1] = unnormalized_derivatives
- unnormalized_derivatives = unnormalized_derivatives_
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/March07/PromptBench/adv_prompts/ul2_fewshot.md b/spaces/March07/PromptBench/adv_prompts/ul2_fewshot.md
deleted file mode 100644
index 18883fef7951c41430edd111d50689140b16606c..0000000000000000000000000000000000000000
--- a/spaces/March07/PromptBench/adv_prompts/ul2_fewshot.md
+++ /dev/null
@@ -1,3187 +0,0 @@
-# ul2_fewshot
-
-# cola
-
-## 10 prompts
-
-Acc: 86.40%, prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Acc: 86.40%, prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable'.
-Acc: 86.40%, prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Acc: 86.40%, prompt: Examine the sentence and decide if its grammar is 'Acceptable' or 'Unacceptable'.
-Acc: 86.30%, prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.
-Acc: 86.30%, prompt: Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable'.
-Acc: 86.30%, prompt: Is the provided sentence grammatically correct? Respond with 'Acceptable' or 'Unacceptable'.
-Acc: 86.20%, prompt: Please evaluate the grammatical structure of the provided sentence and answer with 'Acceptable' or 'Unacceptable'.
-Acc: 86.10%, prompt: Assess the grammatical structure of the given sentence and classify it as 'Acceptable' or 'Unacceptable'.
-Acc: 85.80%, prompt: Analyze the provided sentence and classify its grammatical correctness as 'Acceptable' or 'Unacceptable'.
-
-Acc: 86.90%, prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Acc: 86.70%, prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Acc: 86.60%, prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Acc: 86.50%, prompt: In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect.
-Acc: 86.50%, prompt: In the role of a syntax analyzer, examine the grammar of the provided sentence and decide if it is correct, answering with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Acc: 86.40%, prompt: Working as a grammar classifier, identify whether the given sentence has correct grammar, and respond with 'acceptable' for correct sentences or 'unacceptable' for incorrect ones.
-Acc: 86.30%, prompt: Acting as a grammar detection instrument, determine if the provided sentence is grammatically sound, answering with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Acc: 86.20%, prompt: Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Acc: 86.00%, prompt: As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Acc: 85.70%, prompt: As a tool for determining grammatical correctness, review the sentence and categorize its grammar as either 'acceptable' or 'unacceptable'.
-
-## bertattack
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Assess the current sentence and determine if it is grammaticalsubject correct. Respond with 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 85.10%, dropped acc: 1.30%
-
-Original prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"write the sentence below and identify whether its reception is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 78.70%, dropped acc: 7.70%
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"label the person of the following sentence and check if it is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 78.40%, dropped acc: 8.00%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"In the capacity of a grammar assessment function, report if the plural of the implied tense is grammaticalof wrong, responding with 'acceptable' or 'unacceptable'. "
-Original acc: 86.90%, attacked acc: 84.30%, dropped acc: 2.60%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"s a grammar designation system, examine the a sentence and respond with 'acceptable' for grammaticalmore correct documents or 'unacceptable' for incorrect ones. "
-Original acc: 86.70%, attacked acc: 84.50%, dropped acc: 2.20%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"be doing grammar analysis, classify the rule of the following example as 'acceptable' for wrong spelling or 'unacceptable' for bad grammar. "
-Original acc: 86.60%, attacked acc: 78.20%, dropped acc: 8.40%
-
-## checklist
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable' CuVpbbkC4c. "
-Original acc: 86.40%, attacked acc: 86.00%, dropped acc: 0.40%
-
-Original prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable' Q47Jmd4lMV. "
-Original acc: 86.40%, attacked acc: 85.60%, dropped acc: 0.80%
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable' CuVpbbkC4c. "
-Original acc: 86.40%, attacked acc: 85.90%, dropped acc: 0.50%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable' Kw6nrs57gH. "
-Original acc: 86.90%, attacked acc: 86.90%, dropped acc: 0.00%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones RcRneWo6Iv. "
-Original acc: 86.70%, attacked acc: 86.40%, dropped acc: 0.30%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar EAB4KP2NVY. "
-Original acc: 86.60%, attacked acc: 86.20%, dropped acc: 0.40%
-
-## deepwordbug
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"AQssess the follownig entence and deltermine if it is rammatically crorect. ReTpond with 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 84.50%, dropped acc: 1.90%
-
-Original prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Reviee the sentence below and identfy wehther its grmamar is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 83.60%, dropped acc: 2.80%
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"CCheck the gammar of the fololwing senetnce and ndicate if it is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 84.60%, dropped acc: 1.80%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"Io the capcity of a grammar assessment system, indicate if the strcture of the povided senrence is grammatiaclly corrct, responding with 'acceptable' or 'unacceptable'. "
-Original acc: 86.90%, attacked acc: 85.20%, dropped acc: 1.70%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"As a grammar identification system, examine the provdied sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect onSes. "
-Original acc: 86.70%, attacked acc: 85.90%, dropped acc: 0.80%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"While Sperforming grammar analysis, classify the gramar of the following seEtence as 'acceptable' for corrcet grammar or 'unacceptable' for incorrect grammar. "
-Original acc: 86.60%, attacked acc: 85.50%, dropped acc: 1.10%
-
-## stresstest
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable' and true is true . "
-Original acc: 86.40%, attacked acc: 85.90%, dropped acc: 0.50%
-
-Original prompt: Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 86.40%, attacked acc: 85.40%, dropped acc: 1.00%
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 86.40%, attacked acc: 85.70%, dropped acc: 0.70%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable' and false is not true . "
-Original acc: 86.90%, attacked acc: 87.00%, dropped acc: -0.10%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones and true is true . "
-Original acc: 86.70%, attacked acc: 86.60%, dropped acc: 0.10%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 86.60%, attacked acc: 86.10%, dropped acc: 0.50%
-
-## textbugger
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Verify the terminology of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.60%, attacked acc: 84.50%, dropped acc: 2.10%
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Assess the following sentences and detremine if it is pronouns corrected. Respond with 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.50%, attacked acc: 77.10%, dropped acc: 9.40%
-
-Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Examine the given condemnation and decide if it is pronouns sound. Answer with either 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 68.10%, dropped acc: 18.30%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"In the ability of a grammar assessment system, denotes if the structure of the provided sentence is grammatical corrected, responding with 'acceptable' or 'unacceptable'. "
-Original acc: 86.90%, attacked acc: 85.00%, dropped acc: 1.90%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"As a grammar detecting system, examine the proided penalties and cater with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones. "
-Original acc: 86.70%, attacked acc: 85.40%, dropped acc: 1.30%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"While performing grammar analysis, classify the grammar of the following penalties as 'acceptable' for correct grammr or 'unacceptable' for incorrect grammar. "
-Original acc: 86.60%, attacked acc: 85.70%, dropped acc: 0.90%
-
-## textfooler
-
-Original prompt: Check the grammar of the following sentence and indicate if it is 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Ascertain the verbs of the afterward reprimand and showing if it is 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.60%, attacked acc: 62.30%, dropped acc: 24.30%
-
-Original prompt: Assess the following sentence and determine if it is grammatically correct. Respond with 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Assess the thereafter condemning and establish if it is grammatically precision. Respondent with 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.50%, attacked acc: 78.30%, dropped acc: 8.20%
-
-Original prompt: Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.
-Attacked prompt: b"Inspect the made condemning and select if it is grammatically healthier. Answers with either 'Acceptable' or 'Unacceptable'. "
-Original acc: 86.40%, attacked acc: 39.50%, dropped acc: 46.90%
-
-Original prompt: In the capacity of a grammar assessment system, indicate if the structure of the provided sentence is grammatically correct, responding with 'acceptable' or 'unacceptable'.
-Attacked prompt: b"At the qualities of a grammar assessment system, denotes if the structure of the yielded sentence is grammatically corrective, answers with 'acceptable' or 'unacceptable'. "
-Original acc: 86.90%, attacked acc: 85.10%, dropped acc: 1.80%
-
-Original prompt: As a grammar identification system, examine the provided sentence and respond with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones.
-Attacked prompt: b"As a grammar identification system, examine the delivered chastisement and cater with 'acceptable' for grammatically correct sentences or 'unacceptable' for incorrect ones. "
-Original acc: 86.70%, attacked acc: 84.90%, dropped acc: 1.80%
-
-Original prompt: While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.
-Attacked prompt: b"While performing grammar scans, classify the verbs of the afterward condemned as 'acceptable' for correctness grammar or 'unacceptable' for inappropriate vocabulary. "
-Original acc: 86.60%, attacked acc: 73.00%, dropped acc: 13.60%
-
-## translation
-
-Language: chinese, acc: 86.20%, prompt: As a tool for determining grammatical correctness, please review the following sentences and categorize them grammatically as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 86.50%, prompt: Please evaluate the grammar of the following sentences and mark them as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 86.60%, prompt: Please check that the following sentences are grammatically correct and categorize them as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 86.70%, prompt: Please determine whether the following sentences are 'acceptable' or 'unacceptable' based on grammatical correctness.
-Language: chinese, acc: 86.30%, prompt: Please categorize the grammar of the following sentences as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 85.70%, prompt: Please check the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 86.80%, prompt: Please mark the following sentences as 'acceptable' or 'unacceptable' according to the grammatical norms.
-Language: chinese, acc: 86.00%, prompt: Please assess the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'.
-Language: chinese, acc: 87.00%, prompt: Please mark the following sentences as 'acceptable' or 'unacceptable' for grammatical accuracy.
-Language: chinese, acc: 85.70%, prompt: Please check the grammar of the following sentences and classify them as 'acceptable' or 'unacceptable'.
-Language: french, acc: 84.60%, prompt: As a tool for assessing semantic relevance, categorize the sentence as 'acceptable' or 'unacceptable' according to its meaning and context.
-Language: french, acc: 86.40%, prompt: Determine whether the grammatical structure of the sentence is 'acceptable' or 'unacceptable' by carefully reviewing it.
-Language: french, acc: 86.60%, prompt: Judge whether the sentence follows the syntactic rules of the language and classify it as 'acceptable' or 'unacceptable'.
-Language: french, acc: 85.40%, prompt: Assess the relevance of the sentence by classifying it as 'acceptable' or 'unacceptable' according to the context in which it is used.
-Language: french, acc: 85.20%, prompt: As a semantic classification tool, decide whether the sentence is 'acceptable' or 'unacceptable' according to its meaning and consistency.
-Language: french, acc: 86.80%, prompt: Analyze the sentence carefully and determine whether it is grammatically 'acceptable' or 'unacceptable'.
-Language: french, acc: 86.60%, prompt: As a syntactic validation tool, determine whether the sentence is properly structured and categorize it as 'acceptable' or 'unacceptable'.
-Language: french, acc: 86.30%, prompt: When evaluating the grammar of the sentence, classify it as 'acceptable' or 'unacceptable' according to the rules of the French language.
-Language: french, acc: 86.40%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' depending on its syntactic construction and semantic context.
-Language: french, acc: 86.00%, prompt: As a tool for linguistic analysis, categorize the sentence as 'acceptable' or 'unacceptable' according to its relevance and semantic consistency.
-Language: arabic, acc: 85.70%, prompt: As a tool for determining grammatical correctness, review the sentence and classify its rules as 'acceptable' or 'unacceptable'.
-Language: arabic, acc: 86.50%, prompt: Analyze the sentence carefully and classify its grammar between 'acceptable' or 'unacceptable'.
-Language: arabic, acc: 86.00%, prompt: As a tool for determining grammatical correctness, classify the rules of the sentence between 'acceptable' or 'unacceptable'.
-Language: arabic, acc: 85.90%, prompt: Evaluate the correctness of the sentence between 'acceptable' or 'unacceptable', as a tool for determining grammatical correctness.
-Language: arabic, acc: 85.80%, prompt: As a tool for classifying valid sentences, determine whether 'acceptable' or 'unacceptable' is based on its grammatical rules.
-Language: arabic, acc: 85.40%, prompt: Analyze the sentence carefully and classify its grammatical rules between 'acceptable' or 'unacceptable'.
-Language: arabic, acc: 85.80%, prompt: Analyze the sentence and classify it between 'acceptable' or 'unacceptable' as a grammatical check tool.
-Language: arabic, acc: 86.60%, prompt: As a classification tool for grammatical sentences, determine whether the sentence 'acceptable' or 'unacceptable' is based on its rules.
-Language: arabic, acc: 86.10%, prompt: As a tool for analyzing grammar, classify the rules of the sentence between 'acceptable' or 'unacceptable'.
-Language: arabic, acc: 85.30%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' as a tool for determining grammatical correctness and analyze its grammar.
-Language: spanish, acc: 85.50%, prompt: As a tool to determine grammatical correctness, review the sentence and categorize its grammar as 'acceptable' or 'unacceptable'.
-Language: spanish, acc: 85.80%, prompt: Analyze the sentence carefully and classify its grammar as 'acceptable' or 'unacceptable'.
-Language: spanish, acc: 85.50%, prompt: As a tool for determining grammatical correctness, it categorizes the grammatical rules of the sentence as 'acceptable' or 'unacceptable'.
-Language: spanish, acc: 86.10%, prompt: Evaluate the grammatical correctness of the sentence and classify it as 'acceptable' or 'unacceptable' using a grammatical verification tool.
-Language: spanish, acc: 86.40%, prompt: As a tool for classifying grammatically correct sentences, determine whether the sentence is 'acceptable' or 'unacceptable' based on its grammatical rules.
-Language: spanish, acc: 85.40%, prompt: Analyse the sentence carefully and classify its grammar as 'acceptable' or 'unacceptable' using a grammatical revision tool.
-Language: spanish, acc: 85.80%, prompt: As a tool for classifying grammatical sentences, it determines whether the sentence is 'acceptable' or 'unacceptable' using its grammatical structure.
-Language: spanish, acc: 85.50%, prompt: As a tool for analyzing grammatical correctness, it categorizes the grammatical rules of the sentence as 'acceptable' or 'unacceptable'.
-Language: spanish, acc: 86.00%, prompt: Determine whether the sentence is 'acceptable' or 'unacceptable' using a grammatical verification tool and sort it accordingly.
-Language: spanish, acc: 86.00%, prompt: As a tool for determining grammatical correctness, evaluate the sentence and classify it as 'acceptable' or 'unacceptable' based on its grammatical rules.
-Language: japanese, acc: 85.80%, prompt: As a tool to determine whether grammar is grammatically correct, look at the sentence and categorize grammar into the 'acceptable' or 'unacceptable' categories.
-Language: japanese, acc: 85.70%, prompt: Please read the given sentence and categorize the grammar into the 'acceptable' or 'unacceptable' categories.
-Language: japanese, acc: 85.20%, prompt: To determine the correctness of a sentence, categorize grammar into two categories: 'acceptable' or 'unacceptable'.
-Language: japanese, acc: 86.30%, prompt: Classify the grammar of a given sentence into two categories: 'acceptable' and 'unacceptable'.
-Language: japanese, acc: 86.20%, prompt: Classify a given sentence into the categories 'acceptable' or 'unacceptable' to determine whether it is grammatically correct.
-Language: japanese, acc: 86.90%, prompt: To determine whether it is grammatically correct, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'.
-Language: japanese, acc: 85.70%, prompt: To determine the correctness of grammar, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'.
-Language: japanese, acc: 85.80%, prompt: Classify the grammar of a given sentence into two categories, 'acceptable' or 'unacceptable', and judge its accuracy.
-Language: japanese, acc: 86.50%, prompt: To determine whether it is grammatically correct, divide a given sentence into two categories: 'acceptable' or 'unacceptable'.
-Language: japanese, acc: 85.90%, prompt: To evaluate the accuracy of grammar, categorize a given sentence into two categories: 'acceptable' or 'unacceptable'.
-Language: korean, acc: 86.00%, prompt: As a tool for judging grammatical correctness, please review the sentences and classify the grammar as 'acceptable' or 'unacceptable'.
-Language: korean, acc: 86.10%, prompt: Please read the given sentences, and classify the grammar as 'acceptable' or 'unacceptable'.
-Language: korean, acc: 86.80%, prompt: Please classify the sentences as 'acceptable' or 'unacceptable' to judge the grammatical appropriateness.
-Language: korean, acc: 86.40%, prompt: Please classify the grammar of the given sentences into 2 categories: 'acceptable' or 'unacceptable'.
-Language: korean, acc: 86.00%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' to judge the correctness of the grammar.
-Language: korean, acc: 86.60%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' in order to judge their grammatical appropriateness.
-Language: korean, acc: 86.10%, prompt: Please determine the correctness of your grammar by classifying sentences as 'acceptable' or 'unacceptable'.
-Language: korean, acc: 86.30%, prompt: Classify the grammar of a given sentence as 'acceptable' or 'unacceptable', and judge its accuracy.
-Language: korean, acc: 86.00%, prompt: Please classify the given sentences as 'acceptable' or 'unacceptable' to judge the correctness of the grammar.
-Language: korean, acc: 85.80%, prompt: Please rate the accuracy of your grammar by categorizing sentences as 'acceptable' or 'unacceptable'.
-
-# mnli
-
-## 10 prompts
-
-Acc: 89.20%, prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Acc: 88.60%, prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Acc: 88.00%, prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Acc: 88.00%, prompt: Determine if the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Acc: 86.30%, prompt: Considering the two sentences, identify if their relationship is 'entailment', 'neutral', or 'contradiction'.
-Acc: 86.10%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment', 'neutral', or 'contradiction'.
-Acc: 86.10%, prompt: Review the two statements and categorize their relationship as either 'entailment', 'neutral', or 'contradiction'.
-Acc: 86.00%, prompt: Indicate if the connection between the following sentences is 'entailment', 'neutral', or 'contradiction'.
-Acc: 85.10%, prompt: Assess the connection between the following sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Acc: 85.00%, prompt: Please classify the relationship between the provided sentences as 'entailment', 'neutral', or 'contradiction'.
-
-Acc: 88.80%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Acc: 87.90%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Acc: 87.90%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Acc: 86.00%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment', 'neutral', or 'contradiction'.
-Acc: 85.70%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment', 'neutral', or 'contradiction'.
-Acc: 85.40%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment', 'neutral', or 'contradiction'.
-Acc: 84.90%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment', 'neutral', or 'contradiction'.
-Acc: 84.30%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment', 'neutral', or 'contradiction'.
-Acc: 84.10%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Acc: 83.60%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment', 'neutral', or 'contradiction'.
-
-## bertattack
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"should the tie between the indicated sentence denote entailment, neutral, or contradiction? end with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 89.00%, attacked acc: 82.70%, dropped acc: 6.30%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"that the complement of sentences and count if they lack entailment, neutral, or contradiction. begin with symbol 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.70%, attacked acc: 83.50%, dropped acc: 5.20%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify neither the valid pair of sentences presents entailment, neutral, or contradiction. end with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.20%, attacked acc: 47.90%, dropped acc: 40.30%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"for the functions of a semantic phrase analyzer, write the similarity between the similar verbs and identify if they exhibit entailment, neutral, or contradiction. return with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.90%, attacked acc: 76.40%, dropped acc: 12.50%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment removal instrument, determine if the given contradictory of verbs demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 84.50%, dropped acc: 3.40%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Working as an entailment class*, check neither the assigned singular of plural produces entailment, neutral, or contradiction. contrast with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.80%, attacked acc: 68.70%, dropped acc: 19.10%
-
-## checklist
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction' 9vGXgnbWB8. "
-Original acc: 89.20%, attacked acc: 89.00%, dropped acc: 0.20%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction' XStsD2pUzu. "
-Original acc: 88.60%, attacked acc: 88.40%, dropped acc: 0.20%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' d6ZQ3u0GBQ. "
-Original acc: 88.00%, attacked acc: 88.20%, dropped acc: -0.20%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' RcRneWo6Iv. "
-Original acc: 88.80%, attacked acc: 88.50%, dropped acc: 0.30%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' d6ZQ3u0GBQ. "
-Original acc: 87.90%, attacked acc: 88.00%, dropped acc: -0.10%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' 9vGXgnbWB8. "
-Original acc: 87.90%, attacked acc: 87.90%, dropped acc: 0.00%
-
-## deepwordbug
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Does the relationshi between the giveGn sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 89.20%, attacked acc: 88.30%, dropped acc: 0.90%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.60%, attacked acc: 88.60%, dropped acc: 0.60%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.00%, attacked acc: 88.00%, dropped acc: 0.60%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"In the roe of a semantic elationship analyzer, exmine the connection between the givn sentences and decdie if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.80%, attacked acc: 87.80%, dropped acc: 1.00%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 87.90%, dropped acc: 0.90%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Wroking as an entailment classifier, identfiy whther the gven pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 86.40%, dropped acc: 1.50%
-
-## stresstest
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction' and true is true . "
-Original acc: 89.20%, attacked acc: 89.40%, dropped acc: -0.20%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 88.60%, attacked acc: 88.90%, dropped acc: -0.30%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 88.00%, attacked acc: 88.60%, dropped acc: -0.60%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and true is true . "
-Original acc: 88.80%, attacked acc: 88.80%, dropped acc: 0.00%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction' and false is not true . "
-Original acc: 87.90%, attacked acc: 88.60%, dropped acc: -0.70%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction' and true is true . "
-Original acc: 87.90%, attacked acc: 88.00%, dropped acc: -0.10%
-
-## textbugger
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Does the relation between the awarded sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 89.10%, attacked acc: 88.10%, dropped acc: 1.00%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Examined the doublet of sentence\xd1\x95 and dete\xe2\xb2\x85mine if they exibit entailment, neutral, or contradiction. Asnwer with neither 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.70%, attacked acc: 84.30%, dropped acc: 4.40%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify both the given pair of sentences demonstrate\xd1\x95 entailment, neutral, or contradiction. Answr with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.10%, attacked acc: 86.50%, dropped acc: 1.60%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"In the role of a semantic relation anal\xd1\x83zer, examine the connection between the given sentences and deci\xd4\x81e if they exhibit entailment, neutral, or contradiction. Anwer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.80%, attacked acc: 87.50%, dropped acc: 1.30%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 87.90%, dropped acc: 0.90%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Working as an entailment classifier, identify both the g\xd1\x96ven pair of se\xd5\xb8tences displaying entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 85.70%, dropped acc: 2.20%
-
-## textfooler
-
-Original prompt: Does the relationship between the given sentences represent entailment, neutral, or contradiction? Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Did the tying between the made judgement represent entailment, neutral, or contradiction? Referee with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 89.10%, attacked acc: 85.60%, dropped acc: 3.50%
-
-Original prompt: Examine the pair of sentences and determine if they exhibit entailment, neutral, or contradiction. Answer with either 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Examine the pair of phrases and establish if they exhibit entailment, neutral, or contradiction. Answered with either 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.70%, attacked acc: 87.70%, dropped acc: 1.00%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.10%, attacked acc: 88.10%, dropped acc: 0.60%
-
-Original prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Within the feature of a semantic link analyzer, exam the connection between the given sentence and defining if they exhibit entailment, neutral, or contradiction. Address with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 88.80%, attacked acc: 86.50%, dropped acc: 2.30%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment, neutral, or contradiction. Answer with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 87.90%, dropped acc: 0.90%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment, neutral, or contradiction. Respond with 'entailment', 'neutral', or 'contradiction'.
-Attacked prompt: b"Conjunction as an entailment classifier, detects whether the given matched of sentences demonstrates entailment, neutral, or contradiction. Reply with 'entailment', 'neutral', or 'contradiction'. "
-Original acc: 87.90%, attacked acc: 86.10%, dropped acc: 1.80%
-
-## translation
-
-Language: chinese, acc: 84.40%, prompt: As an implication analysis tool, evaluate the relationships between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 84.50%, prompt: Analyze the relationships between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 85.20%, prompt: Evaluate the logical relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 84.70%, prompt: In your role as an implication analysis tool, judge the relationships of the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 86.20%, prompt: Please judge the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 84.30%, prompt: From a given sentence, evaluate the relationship between them and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 85.60%, prompt: Please classify the following sentences as 'entailment', 'neutral', or 'contradiction' according to their relationships.
-Language: chinese, acc: 85.20%, prompt: Evaluate the logical relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 84.70%, prompt: As an implication analysis tool, categorize the relationships between given sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: chinese, acc: 85.40%, prompt: Please classify the following sentences as 'entailment', 'neutral', or 'contradiction' according to their logical relationship.
-Language: french, acc: 84.60%, prompt: As a tool for analyzing the consequence relationship, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 85.00%, prompt: Evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 86.90%, prompt: Determine whether the following sentences are related to 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 84.70%, prompt: In your role as a consequence analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 85.10%, prompt: Classify the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 84.60%, prompt: As a consequence analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 86.10%, prompt: Analyze the relationship between the given sentences and determine whether it is of 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 85.60%, prompt: Evaluate the relationship between the following sentences and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 84.60%, prompt: As a tool for analyzing the consequence relationship, classify the following sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: french, acc: 86.40%, prompt: Determine whether the given sentences are related to 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 84.90%, prompt: Based on your role as a reasoning analyst, analyze the relationship between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 84.50%, prompt: Evaluate the relationship between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 87.10%, prompt: Determine if the following sentences are 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 84.60%, prompt: In your role as a tool of reasoning analysis, investigate the relationship between sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 85.10%, prompt: Classify the relationship between the following sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 84.20%, prompt: In your role as a tool of reasoning analysis, evaluate the relationship between the given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 85.70%, prompt: Analyze the relationship between the given sentences and determine if they are 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 85.60%, prompt: Evaluate the relationship between the following sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 85.20%, prompt: In your role as a tool of reasoning analysis, the following sentences are classified as 'entailment', 'neutral', or 'contradiction'.
-Language: arabic, acc: 86.50%, prompt: Determine if the sentences given are 'entailment', 'neutral', or 'contradiction'.
-Language: spanish, acc: 84.20%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: spanish, acc: 84.40%, prompt: Determine whether there is 'entailment', 'neutral', or 'contradiction' between the sentences given, using this text analysis tool,
-Language: spanish, acc: 84.40%, prompt: Analyze the relationship between the two sentences and classify it as 'entailment', 'neutral', or 'contradiction' using this text classification tool,
-Language: spanish, acc: 84.90%, prompt: Using this implication analysis tool, decide whether the sentences given are related by 'entailment', 'neutral', or 'contradiction'.
-Language: spanish, acc: 84.30%, prompt: Classifies the relationship between the given phrases as 'entailment', 'neutral', or 'contradiction' using this text analysis tool,
-Language: spanish, acc: 83.50%, prompt: Evaluate whether there is 'entailment', 'neutral', or 'contradiction' between the sentences provided using this text classification tool,
-Language: spanish, acc: 85.00%, prompt: Using this implication analysis tool, decide whether the two sentences are related by 'entailment', 'neutral', or 'contradiction'.
-Language: spanish, acc: 84.80%, prompt: Determine whether the given phrases are related by 'entailment', 'neutral', or 'contradiction' using this text analysis tool,
-Language: spanish, acc: 84.60%, prompt: Analyze the relationship between the two sentences and classify it as 'entailment', 'neutral', or 'contradiction' using this text analysis tool,
-Language: spanish, acc: 84.50%, prompt: Using this text classification tool, it classifies the relationship between the given phrases as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.40%, prompt: As your role as an implication analysis tool, evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.20%, prompt: Use the implication analysis tool as your role to evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.60%, prompt: Use this text classification tool to categorize relationships in a given text as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 83.80%, prompt: Use the implication analysis tool as your role and classify the relationship of a given sentence as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.50%, prompt: Evaluate the relationship of a given sentence and use this text classification tool to classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.40%, prompt: Evaluate the relationship of a given sentence and use this text classification tool to accurately classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 83.40%, prompt: Use the implication analysis tool as your role and use this text classification tool to classify the relationship of a given sentence as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.20%, prompt: Use this text classification tool to evaluate the relationship of a given sentence and classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 84.10%, prompt: Use the implication analysis tool as your role, evaluate the relationship of a given sentence, and use this text classification tool to classify it as 'entailment', 'neutral', or 'contradiction'.
-Language: japanese, acc: 83.90%, prompt: Use the implication analysis tool as your role and categorize the relationship of a given sentence strictly as 'entailment', 'neutral', or 'contradiction' using this text classification tool.
-Language: korean, acc: 84.80%, prompt: Analyze the relationships between given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.60%, prompt: In the text categorization task, identify the relationship between given sentences as one of 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.50%, prompt: Perform the role of analyzing the relationship between sentences and classifying them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.70%, prompt: Evaluate the relationship between two given sentences, and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.20%, prompt: In the text categorization task, perform the role of classifying relationships between given sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.20%, prompt: Judge the associations between sentences, and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.60%, prompt: Analyze the relationship between two given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.20%, prompt: In the task of text classification, identify the relationships between given sentences as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.60%, prompt: Act as an instrument to evaluate the relationships between sentences, and classify them as 'entailment', 'neutral', or 'contradiction'.
-Language: korean, acc: 84.10%, prompt: Analyze the associations of two given sentences and classify them as 'entailment', 'neutral', or 'contradiction'.
-
-# mrpc
-
-## 10 prompts
-
-Acc: 87.01%, prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Acc: 86.76%, prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Acc: 86.27%, prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Acc: 86.27%, prompt: Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent'.
-Acc: 86.03%, prompt: Are the meanings of the following pair of sentences the same? Answer with 'equivalent' or 'not_equivalent'.
-Acc: 85.78%, prompt: Evaluate whether the two provided sentences convey the same meaning by answering 'equivalent' or 'not_equivalent'.
-Acc: 85.54%, prompt: Do the meanings of these two statements align? Indicate your answer with 'equivalent' or 'not_equivalent'.
-Acc: 85.29%, prompt: Examine the pair of sentences and decide if their meanings are identical by answering with 'equivalent' or 'not_equivalent'.
-Acc: 83.82%, prompt: Determine if the meanings of the following sentences are semantically equivalent by responding with 'equivalent' or 'not_equivalent'.
-Acc: 83.58%, prompt: Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'.
-
-Acc: 88.97%, prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Acc: 87.50%, prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Acc: 87.50%, prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Acc: 87.25%, prompt: In your capacity as a semantic comparison tool, examine the given sentences and decide if their meanings are identical, responding with 'equivalent' or 'not_equivalent'.
-Acc: 86.76%, prompt: In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Acc: 86.27%, prompt: In your capacity as a semantic assessment tool, evaluate the provided sentences and classify their meanings as 'equivalent' or 'not_equivalent'.
-Acc: 86.03%, prompt: As a language comparison expert, examine the given pair of sentences and decide if their meanings align, answering with 'equivalent' or 'not_equivalent'.
-Acc: 86.03%, prompt: In the role of a sentence comparison analyst, assess the provided sentences and indicate if they convey the same meaning by selecting 'equivalent' or 'not_equivalent'.
-Acc: 85.78%, prompt: As a linguistic comparator, review the following pair of sentences and determine their semantic equivalence by choosing 'equivalent' or 'not_equivalent'.
-Acc: 85.29%, prompt: As a semantic comparison expert, evaluate the given pair of sentences and determine if they are 'equivalent' or 'not_equivalent'.
-
-## bertattack
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"can the related sentences be both semantically inferior? should answer with 'equivalent' or 'not_equivalent'. "
-Original acc: 87.01%, attacked acc: 64.46%, dropped acc: 22.55%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"like the following sentences and determine if they lack the same plural meaning by responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.76%, attacked acc: 39.71%, dropped acc: 47.06%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"don these two sentences have the same associated context? Respond with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.27%, attacked acc: 84.31%, dropped acc: 1.96%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a sentence compare evaluators, view the derived variables and indicate if their meanings are 'equivalent' or 'not_equivalent'. "
-Original acc: 88.97%, attacked acc: 81.37%, dropped acc: 7.60%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language analytic, evaluate the two sentences and see their difference as 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 82.11%, dropped acc: 5.39%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a case distinction evaluator, analyze the following combining of grammatical and determine if they are semantically 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 81.37%, dropped acc: 6.13%
-
-## checklist
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent' zexKUTWJYG. "
-Original acc: 87.01%, attacked acc: 86.52%, dropped acc: 0.49%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent' BWmsLx8xOA. "
-Original acc: 86.76%, attacked acc: 86.52%, dropped acc: 0.25%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent' 6a4Yn3RGVc. "
-Original acc: 86.27%, attacked acc: 85.78%, dropped acc: 0.49%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent' XStsD2pUzu. "
-Original acc: 88.97%, attacked acc: 88.73%, dropped acc: 0.25%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent' 9vGXgnbWB8. "
-Original acc: 87.50%, attacked acc: 87.50%, dropped acc: 0.00%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent' ofw9fEkN5R. "
-Original acc: 87.50%, attacked acc: 87.25%, dropped acc: 0.25%
-
-## deepwordbug
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Can the given sentences be considered semantically idrntical? Please reply with 'equivalent' or 'not_equivalent'. "
-Original acc: 87.01%, attacked acc: 84.31%, dropped acc: 2.70%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Compare the following senteces and determine if they sharSe the same semantic meaniCg by responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.76%, attacked acc: 85.29%, dropped acc: 1.47%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Do these two seRtences have the same underlying mFeaning? Respond with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.27%, attacked acc: 85.29%, dropped acc: 0.98%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a sentence siimlarity evaluator, anawyze the providBed sentences and indicate if their meanCings are 'equivalent' or 'not_equivalent'. "
-Original acc: 88.97%, attacked acc: 84.80%, dropped acc: 4.17%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 87.50%, dropped acc: 1.47%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a extual simialrity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 84.56%, dropped acc: 2.94%
-
-## stresstest
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 87.01%, attacked acc: 86.52%, dropped acc: 0.49%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 86.76%, attacked acc: 85.78%, dropped acc: 0.98%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 86.27%, attacked acc: 85.78%, dropped acc: 0.49%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 88.97%, attacked acc: 87.75%, dropped acc: 1.23%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 87.50%, attacked acc: 87.25%, dropped acc: 0.25%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 87.50%, attacked acc: 87.01%, dropped acc: 0.49%
-
-## textbugger
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Can the given sentences be deemed semantically idntical? Please reply with 'equivalent' or 'not_equivalent'. "
-Original acc: 87.01%, attacked acc: 83.82%, dropped acc: 3.19%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Compare the later sentences and determine if they share the same semnatic meannig by responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.76%, attacked acc: 84.80%, dropped acc: 1.96%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Do these two verdicts have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.27%, attacked acc: 85.05%, dropped acc: 1.23%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Since a sentence sim ilarity evaluator, analyze the gave sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'. "
-Original acc: 88.97%, attacked acc: 85.54%, dropped acc: 3.43%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language \xc9\x91nalyst, assess the following sentenes and classify their metaphor as 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 83.58%, dropped acc: 3.92%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Since a t\xd0\xb5xtual portrait evaluator, analyze the following pai\xe2\xb2\x85 of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 84.31%, dropped acc: 3.19%
-
-## textfooler
-
-Original prompt: Can the given sentences be considered semantically identical? Please reply with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Can the given sentences be dealt semantically equivalent? Prayed reply with 'equivalent' or 'not_equivalent'. "
-Original acc: 87.01%, attacked acc: 84.31%, dropped acc: 2.70%
-
-Original prompt: Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Compare the later judgements and defines if they exchange the same semantic sensation by answered with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.52%, attacked acc: 83.58%, dropped acc: 2.94%
-
-Original prompt: Do these two sentences have the same underlying meaning? Respond with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Do these two judging am the same underlying meaning? Answer with 'equivalent' or 'not_equivalent'. "
-Original acc: 86.27%, attacked acc: 84.56%, dropped acc: 1.72%
-
-Original prompt: As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a sentence metaphor evaluator, analyze the brought priors and demonstrating if their meanings are 'equivalent' or 'not_equivalent'. "
-Original acc: 89.22%, attacked acc: 85.29%, dropped acc: 3.92%
-
-Original prompt: As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'. "
-Original acc: 87.75%, attacked acc: 87.75%, dropped acc: 1.47%
-
-Original prompt: In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"In your capacity as a language analyst, assess the following sentence and classify their portrait as 'equivalent' or 'not_equivalent'. "
-Original acc: 87.50%, attacked acc: 85.29%, dropped acc: 2.21%
-
-## translation
-
-Language: chinese, acc: 85.54%, prompt: As an expert in semantic comparison, evaluates two given sentences to determine whether they are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.54%, prompt: From the point of view of semantic comparison, determine whether this pair of sentences is 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.29%, prompt: Using the semantic comparison method, determine whether the following two statements are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.05%, prompt: For the following two sentences, determine whether they are 'equivalent' or 'not_equivalent' based on semantic comparison.
-Language: chinese, acc: 85.29%, prompt: As an expert in semantic comparison, please evaluate the following two sentences and determine if they are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 86.03%, prompt: Using semantic comparison techniques, determine whether the following two sentences are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.29%, prompt: Please determine whether the following two sentences are 'equivalent' or 'not_equivalent' according to the standard of semantic comparison.
-Language: chinese, acc: 85.05%, prompt: As an expert in the field of semantic comparison, please evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.54%, prompt: Using semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 85.05%, prompt: Determine whether the following two sentences are 'equivalent' or 'not_equivalent' based on semantic comparison.
-Language: french, acc: 84.80%, prompt: As an expert in semantic comparison, evaluate the following pair of sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: french, acc: 84.80%, prompt: Can you determine whether the following two sentences are 'equivalent' or 'not_equivalent' as a semantic comparison expert?
-Language: french, acc: 85.29%, prompt: Using your expertise in semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent'.
-Language: french, acc: 87.50%, prompt: As a semantic comparison specialist, assess the similarity between the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: french, acc: 84.80%, prompt: Are you able to determine whether the following two sentences are 'equivalent' or 'not_equivalent' as an expert in semantic comparison?
-Language: french, acc: 85.29%, prompt: As a semantic comparison professional, evaluate the following pair of sentences and indicate whether they are 'equivalent' or 'not_equivalent'.
-Language: french, acc: 85.54%, prompt: Can you determine whether the following two sentences have a 'equivalent' or 'not_equivalent' meaning as an expert in semantic comparison?
-Language: french, acc: 87.75%, prompt: As an expert in semantic comparison, assess the similarity between the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: french, acc: 85.29%, prompt: Using your expertise in semantic comparison, determine whether the following two sentences are 'equivalent' or 'not_equivalent' in terms of meaning.
-Language: french, acc: 87.75%, prompt: As a semantic comparison professional, assess the similarity between the following two sentences and indicate whether they are 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.05%, prompt: As an expert in semantic comparison, evaluate the two given sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 86.76%, prompt: Based on my experience in semantic analysis, classify the following two sentences as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.29%, prompt: As an expert in semantic comparison, analyze the following two sentences and classify them as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.29%, prompt: Your task as an expert in semantic comparison is to evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 86.03%, prompt: As a semantic comparison specialist, analyze the two data statements and insert them into one of the following categories: 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.78%, prompt: Based on my experience in semantic analysis, classify the following two sentences between 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 84.80%, prompt: Your role as a semantic comparison specialist requires analyzing the two given sentences and determining whether they are 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 86.76%, prompt: As an experienced semantic analyst, classify the following two sentences as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.54%, prompt: Your job as a semantic analyst evaluates the following two sentences as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 85.78%, prompt: As a semantic analyst, determine whether the given sentences are 'equivalent' or 'not_equivalent' based on their relationship.
-Language: spanish, acc: 84.80%, prompt: As an expert in semantic comparison, it evaluates the pair of sentences provided and determines whether they are 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 86.76%, prompt: Based on my experience in semantic analysis, classify the following two sentences as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 85.54%, prompt: As an expert in semantic comparison, analyze the two sentences given and classify them as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 84.80%, prompt: Your task as a semantic comparison specialist is to evaluate the following two sentences and determine whether they are 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 85.78%, prompt: As an expert in semantic analysis, he makes a classification of the following two sentences based on their 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 85.29%, prompt: Based on your experience of semantic comparison, classify the next two sentences as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 86.27%, prompt: As a specialist in semantic analysis, you are given the task of analysing the two sentences given and classifying them as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 85.05%, prompt: As an expert in semantic comparison, he classifies the following two sentences into 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 86.27%, prompt: As a specialist in semantic analysis, evaluate the following two sentences and classify them as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 85.78%, prompt: Your task as an expert in semantic comparison is to analyze the two sentences provided and determine whether they are 'equivalent' or 'not_equivalent' based on their semantic relationship.
-Language: japanese, acc: 84.80%, prompt: Evaluate whether a given pair of sentences is 'equivalent' or 'not_equivalent', depending on the context.
-Language: japanese, acc: 85.29%, prompt: Use a semantic comparison to determine whether a given pair of sentences is 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 85.78%, prompt: Evaluate a given pair of sentences as 'equivalent' or 'not_equivalent' by determining whether they have the same semantic meaning.
-Language: japanese, acc: 85.78%, prompt: Determine whether a given pair of sentences is synonyms and evaluate whether they are 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 85.54%, prompt: Determine whether a given pair of sentences is 'equivalent' or 'not_equivalent', and whether they are semantically identical.
-Language: japanese, acc: 85.29%, prompt: Determinate whether a given pair of sentences has the same meaning and evaluate whether they are 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 85.78%, prompt: Evaluate whether a given pair of sentences is 'equivalent' or 'not_equivalent' by determining whether they are semantically identical.
-Language: japanese, acc: 85.05%, prompt: Judge whether a given pair of sentences is equal and evaluate whether they are 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 85.54%, prompt: Determinate whether a given pair of sentences are semantically equal and evaluate whether they are 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 84.07%, prompt: Whether a given pair of sentences is 'equivalent' or 'not_equivalent' depends on the context.
-Language: korean, acc: 85.29%, prompt: As a sentence comparator, evaluate the two sentences given to determine 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 86.03%, prompt: Compare two sentences to determine 'equivalent' or 'not_equivalent'. For this you need qualifications as a specialist in semantic comparison.
-Language: korean, acc: 85.29%, prompt: It takes your knowledge as an expert in semantic comparison to determine that two sentences are 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 85.05%, prompt: As a specialist in semantic comparison, evaluate whether two given sentences are 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 85.54%, prompt: Analyze two sentences to determine 'equivalent' or 'not_equivalent'. For that you need the knowledge of a semantic comparison expert.
-Language: korean, acc: 85.78%, prompt: As an expert in semantic comparison, decide whether two sentences are 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 85.05%, prompt: It takes the knowledge of an expert in semantic comparison to compare two sentences to judge 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 84.80%, prompt: Experience as an expert in semantic comparison is required to determine whether two given sentences are 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 85.29%, prompt: As an expert in semantic comparison, determine whether two sentences are 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 85.78%, prompt: Analyze two sentences to determine 'equivalent' or 'not_equivalent'. For this, you need a qualification as a specialist in semantic comparison.
-
-# qnli
-
-## 10 prompts
-
-Acc: 94.50%, prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Acc: 94.00%, prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Acc: 94.00%, prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Acc: 93.90%, prompt: Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'.
-Acc: 93.70%, prompt: Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'.
-Acc: 93.20%, prompt: Please assess if the answer to the question can be derived from the given context by selecting 'entailment' or 'not_entailment'.
-Acc: 93.00%, prompt: Consider the context and question, and indicate if the answer can be logically deduced from the context by responding with 'entailment' or 'not_entailment'.
-Acc: 93.00%, prompt: Assess if the answer to the question can be logically concluded from the provided context by choosing 'entailment' or 'not_entailment'.
-
-Acc: 93.90%, prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Acc: 93.70%, prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Acc: 93.70%, prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: In the capacity of a language evaluator, examine if the given context justifies the answer to the question and indicate your assessment with 'entailment' or 'not_entailment'.
-Acc: 93.40%, prompt: As a language expert, assess if the given context entails the answer to the question and respond with 'entailment' or 'not_entailment'.
-Acc: 93.30%, prompt: As a textual analyst, examine if the given context logically implies the answer to the question and indicate your decision with 'entailment' or 'not_entailment'.
-Acc: 93.30%, prompt: In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'.
-Acc: 93.20%, prompt: In the capacity of a language specialist, decide if the context presented contains enough information to infer the answer to the question and respond with 'entailment' or 'not_entailment'.
-Acc: 93.00%, prompt: As a textual inference expert, analyze if the answer to the question can be deduced from the provided context and select 'entailment' or 'not_entailment'.
-Acc: 92.90%, prompt: As a linguistic consultant, decide if the answer to the question is logically supported by the provided context and respond with 'entailment' or 'not_entailment'.
-
-## bertattack
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and knowledge, then if the form permits the answer by responses with 'entailment' or 'not_entailment'. "
-Original acc: 94.50%, attacked acc: 92.20%, dropped acc: 2.30%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Analyze the context and question, and indicate if the dictionary ennotes the context by avoiding 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 88.80%, dropped acc: 5.20%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"review the requested sentence and question, and do if the context contains enough information to further the asking by selecting 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 92.60%, dropped acc: 1.40%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"are a semantic analysis, evaluate assuming the provided context supports the answers to the answer and reject 'entailment' or 'not_entailment'. "
-Original acc: 93.90%, attacked acc: 90.20%, dropped acc: 3.70%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"if your performance as a semantic evaual-, assess if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.60%, attacked acc: 92.70%, dropped acc: 0.90%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"with a semantic presentation, evaluation why the provided contexts supports the answer to the given answer and questions with 'entailment' or 'not_entailment'. "
-Original acc: 93.60%, attacked acc: 83.50%, dropped acc: 10.10%
-
-## checklist
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment' EAB4KP2NVY. "
-Original acc: 94.50%, attacked acc: 94.20%, dropped acc: 0.30%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment' lf8wBa2yEm. "
-Original acc: 94.00%, attacked acc: 93.40%, dropped acc: 0.60%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment' SFPCqpiClT. "
-Original acc: 94.00%, attacked acc: 93.70%, dropped acc: 0.30%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment' AMsRIKZniY. "
-Original acc: 93.90%, attacked acc: 93.60%, dropped acc: 0.30%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment' zq0DcZ5dnI. "
-Original acc: 93.70%, attacked acc: 93.50%, dropped acc: 0.20%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment' 9vGXgnbWB8. "
-Original acc: 93.70%, attacked acc: 93.60%, dropped acc: 0.10%
-
-## deepwordbug
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and questJon, decide if the informaton supforts the answer by responding with 'entailment' or 'not_entailment'. "
-Original acc: 94.50%, attacked acc: 93.90%, dropped acc: 0.60%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 94.00%, dropped acc: 0.50%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"Review the given cotext and question, and Pdecide if the context contains enough information to sGupport the answe by sefecting 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 93.40%, dropped acc: 0.60%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a seantic researcher, evaluate hether the provided context suports the answer to the question and ahoose 'entailment' or 'not_entailment'. "
-Original acc: 93.90%, attacked acc: 93.10%, dropped acc: 0.80%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as a Pemantic evaluaztor, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.70%, attacked acc: 93.20%, dropped acc: 0.50%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic interpreter, assless wether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.70%, attacked acc: 92.90%, dropped acc: 0.80%
-
-## stresstest
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 94.50%, attacked acc: 94.60%, dropped acc: -0.10%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 94.00%, attacked acc: 93.90%, dropped acc: 0.10%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 94.00%, attacked acc: 94.30%, dropped acc: -0.30%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment' and true is true . "
-Original acc: 93.90%, attacked acc: 94.00%, dropped acc: -0.10%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.70%, attacked acc: 94.10%, dropped acc: -0.40%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment' and true is true . "
-Original acc: 93.70%, attacked acc: 94.10%, dropped acc: -0.40%
-
-## textbugger
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and que\xd1\x95tion, decide if the details supporting the answer by responding with 'entailment' or 'not_entailment'. "
-Original acc: 94.50%, attacked acc: 93.80%, dropped acc: 0.70%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Analyze the context and question, and indicate if the context implies the answer by choosing 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 93.30%, dropped acc: 0.70%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"Review the given co\xd5\xb8text and question, and dcide if the context contains enough particulars to support the answe\xe2\xb2\x85 by selecting 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 92.90%, dropped acc: 1.10%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic investigator, evalu\xc9\x91te both the p\xe2\xb2\x85ovided context support the answer to the question and choose 'entailment' or 'not_entailment'. "
-Original acc: 93.90%, attacked acc: 93.10%, dropped acc: 0.80%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"During your ro le as a semantic ealuator, deter mine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.70%, attacked acc: 92.70%, dropped acc: 1.00%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Iike a semantic interpretation, evaluated lf the pr\xd0\xbevided context supports the answer to the given question and answers with 'entailment' or 'not_entailment'. "
-Original acc: 93.70%, attacked acc: 92.30%, dropped acc: 1.40%
-
-## textfooler
-
-Original prompt: Based on the provided context and question, decide if the information supports the answer by responding with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Based on the provided context and question, decide if the particulars brackets the answered by responding with 'entailment' or 'not_entailment'. "
-Original acc: 94.60%, attacked acc: 93.30%, dropped acc: 1.30%
-
-Original prompt: Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.
-Attacked prompt: b"Explore the context and question, and suggest if the context involves the rebuttal by selected 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 54.20%, dropped acc: 39.80%
-
-Original prompt: Review the given context and question, and decide if the context contains enough information to support the answer by selecting 'entailment' or 'not_entailment'.
-Attacked prompt: b"Scrutinized the given backdrop and theme, and choices if the context composed enough indications to backing the answer by choosing 'entailment' or 'not_entailment'. "
-Original acc: 94.00%, attacked acc: 92.30%, dropped acc: 1.70%
-
-Original prompt: As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.
-Attacked prompt: b"Because a semantic inquisitors, appraise whether the provided context alleges the rebuttal to the issuing and selector 'entailment' or 'not_entailment'. "
-Original acc: 93.90%, attacked acc: 61.50%, dropped acc: 32.40%
-
-Original prompt: In your role as a semantic evaluator, determine if the provided context justifies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as a semantic evaluator, determine if the presented context deserved the rebuttal to the questions and answering with 'entailment' or 'not_entailment'. "
-Original acc: 93.60%, attacked acc: 90.50%, dropped acc: 3.10%
-
-Original prompt: As a semantic interpreter, assess whether the provided context supports the answer to the given question and answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a semantic performer, appraised whether the provided framework supports the rebuttal to the supplied question and cope with 'entailment' or 'not_entailment'. "
-Original acc: 93.60%, attacked acc: 92.50%, dropped acc: 1.10%
-
-## translation
-
-Language: chinese, acc: 93.50%, prompt: Determine whether the answer is implied or not based on the context. Answer 'entailment' or 'not_entailment'.
-Language: chinese, acc: 94.30%, prompt: As a language expert, determine whether the text contains the answer and choose 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.70%, prompt: Determine whether the text implies an answer, and select 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.10%, prompt: Given the text and the question, whether the answer is implied, select 'entailment' or 'not_entailment'.
-Language: chinese, acc: 94.20%, prompt: Determine whether the text contains the answer, depending on the context. Select 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.80%, prompt: As a language expert, determine whether a text can contain an answer, and say 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.60%, prompt: Please determine whether the text implies an answer. Answer 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.70%, prompt: Please select 'entailment' or 'not_entailment' based on the text and the question.
-Language: chinese, acc: 93.20%, prompt: Assess whether the answer is implied based on the context. Answer 'entailment' or 'not_entailment'.
-Language: chinese, acc: 94.20%, prompt: Please determine whether the text contains the answer and answer 'entailment' or 'not_entailment'.
-Language: french, acc: 93.50%, prompt: As a linguistic expert, assess whether the given context involves the answer to the question and answer with 'entailment' or 'not_entailment'.
-Language: french, acc: 94.10%, prompt: Determine whether the information provided in the context necessarily leads to the answer to the question asked and indicate 'entailment' or 'not_entailment'.
-Language: french, acc: 93.40%, prompt: Analyze the text to determine if the answer to the question is implied in the context and specify 'entailment' or 'not_entailment'.
-Language: french, acc: 93.50%, prompt: Based on the given context, decide whether the answer to the question is necessarily involved and mark 'entailment' or 'not_entailment'.
-Language: french, acc: 93.10%, prompt: Evaluate whether the answer to the question can be deduced from the given context and mark 'entailment' or 'not_entailment'.
-Language: french, acc: 94.70%, prompt: Discern whether the context provided directly involves the answer to the question and indicate 'entailment' or 'not_entailment'.
-Language: french, acc: 93.30%, prompt: Determine if the context contains enough information to involve the answer to the question and mark 'entailment' or 'not_entailment'.
-Language: french, acc: 93.60%, prompt: Assess whether the context provided necessarily leads to the answer to the question and answer with 'entailment' or 'not_entailment'.
-Language: french, acc: 93.10%, prompt: Analyze the text to determine if the answer to the question is involved in the context and indicate 'entailment' or 'not_entailment'.
-Language: french, acc: 93.20%, prompt: Based on the given context, decide whether the answer to the question is necessarily inferred and mark 'entailment' or 'not_entailment'.
-Language: arabic, acc: 93.50%, prompt: As a language expert, evaluate whether the given context calls for an answer and answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 93.50%, prompt: Judge the relationship between the text and the question and answer 'entailment' or 'not_entailment', depending on your language experience.
-Language: arabic, acc: 93.50%, prompt: Does the context given indicate the answer to the question? Evaluate and answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 94.00%, prompt: Based on your linguistic knowledge, does the text relate to the question? Answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 93.90%, prompt: As a language expert, determine how the text relates to the question and answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 93.30%, prompt: Does the text support the answer to the question? Answer 'entailment' or 'not_entailment', depending on your language experience.
-Language: arabic, acc: 92.90%, prompt: Check the text link to the question and answer 'entailment' or 'not_entailment', depending on your language skills.
-Language: arabic, acc: 93.10%, prompt: As a language expert, is there a link between the text and the question? Answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 94.20%, prompt: Based on your language experience, does context help to answer the question? Evaluate and answer 'entailment' or 'not_entailment'.
-Language: arabic, acc: 93.50%, prompt: Does the text give a clear answer to the question? Answer 'entailment' or 'not_entailment', depending on your language experience.
-Language: spanish, acc: 93.20%, prompt: As a language expert, evaluate whether the given context implies the answer to the question and answer with 'entailment' or 'not_entailment'.
-Language: spanish, acc: 93.60%, prompt: Determine whether the information given in the text necessarily implies the veracity of the hypothesis and answer 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.90%, prompt: Analyzes whether the information presented in the paragraph leads to the conclusion of the question and labels the answer as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.20%, prompt: Indicates whether the information provided in the text is sufficient to conclude the statement and labels the response as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.10%, prompt: As an expert on the subject, judge whether the information provided in the text justifies the claim and classify the answer as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.10%, prompt: Evaluates whether the information in the paragraph necessarily supports the conclusion of the hypothesis and responds 'entailment' or 'not_entailment'.
-Language: spanish, acc: 93.90%, prompt: Determines whether the information presented in the text logically implies the answer to the question and labels the answer as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.20%, prompt: Analyzes whether the information provided in the paragraph necessarily leads to the veracity of the hypothesis and classifies the response as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 93.70%, prompt: As an expert on the subject, evaluate whether the information presented in the text supports the claim and respond 'entailment' or 'not_entailment'.
-Language: spanish, acc: 94.30%, prompt: Indicates whether the information provided in the paragraph necessarily implies the answer to the question and labels the answer as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.70%, prompt: Rate whether the answer to the question is derived from the given context and answer with 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.20%, prompt: Please answer 'entailment' or 'not_entailment' for the given context and question.
-Language: japanese, acc: 93.20%, prompt: Decide whether the answer to the question is derived from the given context and answer 'entailment' or 'not_entailment'.
-Language: japanese, acc: 92.60%, prompt: Compare the question with the given context and give the answer 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.90%, prompt: Determinate whether the given context contains the answer to the question and answer with 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.10%, prompt: Estimate the answer of the question from the context and give the answer 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.90%, prompt: Determinate whether the given context is relevant to the question and answer with 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.90%, prompt: Determine whether the given context is relevant to the question and answer with 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.60%, prompt: Determinate whether the given context contains the answer to the question and answer 'entailment' or 'not_entailment'.
-Language: japanese, acc: 93.00%, prompt: Answer with 'entailment' or 'not_entailment', inferring from the given context.
-Language: korean, acc: 93.40%, prompt: Determine if a given sentence necessarily implies the meaning of another sentence and answer 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.50%, prompt: By understanding the relations between sentences, judge whether a given sentence necessarily refers to another sentence and answer with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.80%, prompt: Evaluate whether a given text necessarily indicates the meaning of another text and respond with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.40%, prompt: Understand the relations of a sentence, to determine whether a given sentence necessarily includes other sentences and answer with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.40%, prompt: Judge whether a given content necessarily implies the meaning of another content and answer with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.80%, prompt: Grasp the relations between sentences, determine if a given sentence necessarily contains the meaning of another sentence and respond with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.90%, prompt: Evaluate whether a given text necessarily refers to another text and answer with 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.30%, prompt: By comparing the meaning of the sentences, to determine if a given sentence necessarily implies another sentence and answer 'entailment' or 'not_entailment'.
-Language: korean, acc: 94.00%, prompt: Evaluate whether the contents given necessarily refer to other contents and answer with 'entailment' or 'not_entailment'.
-Language: korean, acc: 83.00%, prompt: By analyzing the relations between sentences, determine if a given sentence does not necessarily include other sentences and answer with 'entailment' or 'not_entailment'.
-
-# qqp
-
-## 10 prompts
-
-Acc: 88.80%, prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Acc: 88.70%, prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Acc: 88.50%, prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Acc: 88.40%, prompt: Analyze if the given set of sentences have the same connotation by answering with 'equivalent' or 'not_equivalent'.
-Acc: 88.30%, prompt: Are the meanings of these two phrases the same? Reply with 'equivalent' or 'not_equivalent'.
-Acc: 88.20%, prompt: Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent'.
-Acc: 88.10%, prompt: Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'.
-Acc: 88.10%, prompt: Please indicate if the following pair of sentences share the same meaning by responding with 'equivalent' or 'not_equivalent'.
-Acc: 88.10%, prompt: Do the following expressions mean the same thing? Provide your answer as 'equivalent' or 'not_equivalent'.
-Acc: 88.00%, prompt: Evaluate whether these two phrases have identical meanings and respond with 'equivalent' or 'not_equivalent'.
-Acc: 87.90%, prompt: Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'.
-
-Acc: 90.00%, prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Acc: 89.30%, prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Acc: 89.10%, prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Acc: 88.90%, prompt: As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.
-Acc: 88.80%, prompt: In your role as a question comparison tool, assess the following pair of questions and classify them as 'equivalent' or 'not_equivalent'.
-Acc: 88.70%, prompt: In the capacity of a question assessment system, indicate if the meaning of the provided questions is the same, responding with 'equivalent' or 'not_equivalent'.
-Acc: 88.40%, prompt: As a question equivalence detection system, examine the provided questions and respond with 'equivalent' if they are the same in meaning, or 'not_equivalent' if they are different.
-Acc: 88.30%, prompt: Working as a question classifier, identify whether the given questions share the same meaning, and respond with 'equivalent' for equivalent questions or 'not_equivalent' for different ones.
-Acc: 88.20%, prompt: Acting as a question equivalence instrument, determine if the provided questions are equivalent in meaning, answering with 'equivalent' for similar questions or 'not_equivalent' for dissimilar ones.
-Acc: 88.00%, prompt: In the role of a question similarity analyzer, examine the meaning of the provided questions and decide if they are equivalent, answering with 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-
-## bertattack
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"ask if the certain pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 88.80%, attacked acc: 88.00%, dropped acc: 0.80%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the past expressions and give me if they are common in meaning by putting 'equivalent' or 'not_equivalent'. "
-Original acc: 88.70%, attacked acc: 82.90%, dropped acc: 5.80%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'were the following twice equations equivalent or not? Answer me with "equivalent" or "not_equivalent". '
-Original acc: 88.50%, attacked acc: 87.30%, dropped acc: 1.20%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"according a tool for identifying concept equivalence, do the questions and tcsconsider their compared as neither 'equivalent' or 'not_equivalent'. "
-Original acc: 90.10%, attacked acc: 74.30%, dropped acc: 15.80%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"While performing question comparison analysis, assign the difference of the following expressions as 'equivalent' for equivalent questions or 'not_equivalent' for combined measures. "
-Original acc: 89.30%, attacked acc: 86.00%, dropped acc: 3.30%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functioning as a question change evaluation tool, test the presented yes and tell if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 89.10%, attacked acc: 87.10%, dropped acc: 2.00%
-
-## checklist
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent' AMsRIKZniY. "
-Original acc: 88.80%, attacked acc: 88.90%, dropped acc: -0.10%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent' LkF0FZxMZ4. "
-Original acc: 88.70%, attacked acc: 88.60%, dropped acc: 0.10%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent Cev0s49fbe". '
-Original acc: 88.50%, attacked acc: 88.40%, dropped acc: 0.10%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent' 6a4Yn3RGVc. "
-Original acc: 90.00%, attacked acc: 89.70%, dropped acc: 0.30%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions ofOQXLxiW4. "
-Original acc: 89.30%, attacked acc: 89.20%, dropped acc: 0.10%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent' KjPJJ2a7RB. "
-Original acc: 89.10%, attacked acc: 88.70%, dropped acc: 0.40%
-
-## deepwordbug
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"DeterminIe if the gievn pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 88.80%, attacked acc: 88.40%, dropped acc: 0.40%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the following expressiZns and tell me if they are aike in meaning by wusing 'equivalent' or 'not_equivalent'. "
-Original acc: 88.70%, attacked acc: 87.40%, dropped acc: 1.30%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". '
-Original acc: 88.50%, attacked acc: 88.50%, dropped acc: 0.20%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a tool for determining question equivalence, rPeview the questions and categocrize their simiXlarity as either 'equivalent' or 'not_equivalent'. "
-Original acc: 90.00%, attacked acc: 88.00%, dropped acc: 2.00%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"While performing Pquestion comparison analysig, classify the simioarity of the following queFtions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions. "
-Original acc: 89.30%, attacked acc: 87.60%, dropped acc: 1.70%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functioning as a queFstion imilarity evaluation too, analJyze the given questions and deicde if they hsare the same meaning, respnoding with 'equivalent' or 'not_equivalent'. "
-Original acc: 89.10%, attacked acc: 87.70%, dropped acc: 1.40%
-
-## stresstest
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 88.80%, attacked acc: 88.80%, dropped acc: 0.00%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 88.70%, attacked acc: 88.40%, dropped acc: 0.30%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent and true is true ". '
-Original acc: 88.50%, attacked acc: 88.00%, dropped acc: 0.50%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 90.00%, attacked acc: 89.50%, dropped acc: 0.50%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 89.30%, attacked acc: 89.30%, dropped acc: 0.00%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent' and false is not true . "
-Original acc: 89.10%, attacked acc: 88.90%, dropped acc: 0.20%
-
-## textbugger
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Defining if the given paired of statements can be considered the same by responds with 'equivalent' or 'not_equivalent'. "
-Original acc: 88.80%, attacked acc: 87.60%, dropped acc: 1.20%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the following expressions and tell me if they are ali ke in meaning by using 'equivalent' or 'not_equivalent'. "
-Original acc: 88.70%, attacked acc: 87.60%, dropped acc: 1.10%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent". '
-Original acc: 88.40%, attacked acc: 88.40%, dropped acc: 0.30%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"As a tool for deciding que\xd1\x95tion equivalence, review the questions and classifications their portrait as neither 'equivalent' or 'not_equivalent'. "
-Original acc: 90.00%, attacked acc: 87.60%, dropped acc: 2.40%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"Whilst performing question comparison analyses, classify the likeness of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions. "
-Original acc: 89.30%, attacked acc: 88.30%, dropped acc: 1.00%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functioning as a question portrait evaluation tool, aanlyze the given questio\xd5\xb8s and deci de if they share the same meaning, responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 89.10%, attacked acc: 88.10%, dropped acc: 1.00%
-
-## textfooler
-
-Original prompt: Determine if the given pair of statements can be considered the same by responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Defined if the given pair of statements can is considered the same by responded with 'equivalent' or 'not_equivalent'. "
-Original acc: 88.80%, attacked acc: 87.70%, dropped acc: 1.10%
-
-Original prompt: Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Examine the afterward words and saying me if they are uniformly in connotation by using 'equivalent' or 'not_equivalent'. "
-Original acc: 88.70%, attacked acc: 85.90%, dropped acc: 2.80%
-
-Original prompt: Are the following two questions equivalent or not? Answer me with "equivalent" or "not_equivalent".
-Attacked prompt: b'Exists the farther two facets equivalent or not? Answer me with "equivalent" or "not_equivalent". '
-Original acc: 88.40%, attacked acc: 87.20%, dropped acc: 1.20%
-
-Original prompt: As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Because a tool for determining question equivalence, examination the queries and classifications their likeness as either 'equivalent' or 'not_equivalent'. "
-Original acc: 90.00%, attacked acc: 87.60%, dropped acc: 2.40%
-
-Original prompt: While performing question comparison analysis, classify the similarity of the following questions as 'equivalent' for equivalent questions or 'not_equivalent' for different questions.
-Attacked prompt: b"While performing question comparing analysis, classifications the likeness of the following queries as 'equivalent' for equivalent question or 'not_equivalent' for diverse item. "
-Original acc: 89.30%, attacked acc: 87.60%, dropped acc: 1.70%
-
-Original prompt: Functioning as a question similarity evaluation tool, analyze the given questions and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.
-Attacked prompt: b"Functional as a question resemblance evaluation mechanism, analytical the given questions and chose if they exchange the same meaning, responding with 'equivalent' or 'not_equivalent'. "
-Original acc: 89.10%, attacked acc: 87.60%, dropped acc: 1.50%
-
-## translation
-
-Language: chinese, acc: 89.70%, prompt: As a tool for determining the equivalence of problems, the problems are examined and their similarity is classified as 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 89.30%, prompt: Please compare the similarity of two sentences and put them into the category of 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 88.90%, prompt: Two sentences are classified as 'equivalent' or 'not_equivalent' for their similarity.
-Language: chinese, acc: 89.90%, prompt: You can determine how similar the questions are by comparing them and categorizing them as 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 89.50%, prompt: Using the method of contrast, the similarity of these problems is divided into two categories: 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 88.40%, prompt: By comparing these issues, you can classify them as 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 89.50%, prompt: To determine whether the questions are similar, put them into the category of 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 89.60%, prompt: Divide the similarity of these questions into 'equivalent' or 'not_equivalent' categories.
-Language: chinese, acc: 90.10%, prompt: Using the similarity assessment tool, these questions were classified as 'equivalent' or 'not_equivalent'.
-Language: chinese, acc: 89.60%, prompt: By analyzing the similarity of these problems, they are divided into categories of 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.50%, prompt: As a tool to determine the equivalence of questions, review the questions and rank their similarity as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.30%, prompt: Please compare the similarity of two sentences and classify them as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 90.00%, prompt: Based on the similarity of two sentences, classify them as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 90.10%, prompt: You can determine the similarity between these questions by comparing them and classifying them as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.90%, prompt: Use a comparative method to divide the similarity of these questions into two categories: 'equivalent' or 'not_equivalent'.
-Language: french, acc: 88.10%, prompt: By comparing these questions, you can classify them as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.70%, prompt: Determine whether these questions are similar or not, and then classify them as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.80%, prompt: Divide the similarity of these questions into two categories: 'equivalent' or 'not_equivalent'.
-Language: french, acc: 90.10%, prompt: Use a similarity assessment tool to classify these questions as 'equivalent' or 'not_equivalent'.
-Language: french, acc: 89.70%, prompt: By analyzing the similarity of these questions, you can divide them into two categories: 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 89.30%, prompt: As a tool for determining an equation of questions, review the questions and classify their similarity as either 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 89.80%, prompt: When using questions in the classification domain, please classify the similarity between the questions as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 89.10%, prompt: To determine an equation of questions, you must review the questions and classify their similarity as 'equivalent' or 'not_equivalent'.
-Language: arabic, acc: 88.80%, prompt: Questions can be classified as 'equivalent' or 'not_equivalent' when used to identify classifications.
-Language: arabic, acc: 89.10%, prompt: Classification of question similarity as 'equivalent' or 'not_equivalent' is used as a tool to determine the classification of questions.
-Language: arabic, acc: 89.00%, prompt: Classify the similarity of the questions as 'equivalent' or 'not_equivalent' to determine the equation of the questions.
-Language: arabic, acc: 89.30%, prompt: Identifying the similarity of questions and classifying them as 'equivalent' or 'not_equivalent' is an important tool in determining the classification of questions.
-Language: arabic, acc: 89.00%, prompt: When classifying questions, their similarity can be classified as 'equivalent' or 'not_equivalent' to determine the correct classification.
-Language: arabic, acc: 89.50%, prompt: The similarity of questions should be classified as 'equivalent' or 'not_equivalent' when used to determine the equation of questions.
-Language: arabic, acc: 89.10%, prompt: Identifying the similarity of questions and classifying them as 'equivalent' or 'not_equivalent' helps to correctly classify questions.
-Language: spanish, acc: 90.00%, prompt: As a tool to determine the equivalence of questions, it reviews the questions and classifies their similarity as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 89.30%, prompt: Evaluate the similarity between questions and classify them as 'equivalent' or 'not_equivalent' to determine their equivalence.
-Language: spanish, acc: 89.70%, prompt: Determine whether two questions are 'equivalent' or 'not_equivalent' based on similarity and characteristics.
-Language: spanish, acc: 89.10%, prompt: Classifies the similarity between questions as 'equivalent' or 'not_equivalent' to determine their equivalence.
-Language: spanish, acc: 89.60%, prompt: Review the questions and rate them as 'equivalent' or 'not_equivalent' based on their similarity and content.
-Language: spanish, acc: 89.20%, prompt: As part of the classification task of questions, it determines their equivalence by categorizing their similarity as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 89.50%, prompt: Analyze the similarity between questions and classify them as 'equivalent' or 'not_equivalent' to determine their equivalence.
-Language: spanish, acc: 90.10%, prompt: As a method of identifying the equivalence of questions, it categorizes their similarity as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 89.30%, prompt: To determine the equivalence between questions, check their similarity and classify them as 'equivalent' or 'not_equivalent'.
-Language: spanish, acc: 89.90%, prompt: Classify the similarity between questions as 'equivalent' or 'not_equivalent' to determine whether they are equivalent or not.
-Language: japanese, acc: 89.40%, prompt: As a tool to determine the equivalence of the question, review the question and categorize its similarities into 'equivalent' or 'not_equivalent' categories.
-Language: japanese, acc: 88.60%, prompt: Work on text sorting tasks labeled 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 88.60%, prompt: For text classification tasks, use the labels 'equivalent' or 'not_equivalent' to determine the equivalence of statements.
-Language: japanese, acc: 88.50%, prompt: In the MRPC dataset, use the labels 'equivalent' or 'not_equivalent' to classify the equivalence of statements.
-Language: japanese, acc: 88.80%, prompt: As a tool for determining equivalence, check sentences and categorize them into 'equivalent' or 'not_equivalent' categories.
-Language: japanese, acc: 88.40%, prompt: Use the labels 'equivalent' or 'not_equivalent' to determine the equivalence of statements in text classification tasks.
-Language: japanese, acc: 88.90%, prompt: In the text classification task of the MRPC data set, classify the equivalence of statements with labels of 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 89.00%, prompt: As a tool to determine the equivalence of statements, categorize statements into 'equivalent' or 'not_equivalent' categories.
-Language: japanese, acc: 88.50%, prompt: In a text classification task, classify the equivalence of statements using labels of 'equivalent' or 'not_equivalent'.
-Language: japanese, acc: 88.80%, prompt: Do a text classification task to determine the equivalence of statements, labeled 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 88.30%, prompt: Classify two given sentences as 'equivalent' or 'not_equivalent' by discriminating whether they have the same meaning.
-Language: korean, acc: 89.80%, prompt: Determine sentence equivalence by judging the similarity of two sentences with 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 88.70%, prompt: Classify the similarity of sentences as 'equivalent' or 'not_equivalent' by judging whether two sentences have the same meaning.
-Language: korean, acc: 89.30%, prompt: Determine if two given sentences are equivalent to each other, and classify their similarity as 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 89.20%, prompt: Compare two given sentences to determine sentence equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 87.70%, prompt: Classify sentence equivalence as 'equivalent' or 'not_equivalent' by judging whether two sentences have the same meaning to each other.
-Language: korean, acc: 89.30%, prompt: Determine if two sentences have the same meaning, and classify their similarities as 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 89.10%, prompt: Compare two given sentences to determine their equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 89.30%, prompt: Review two sentences to evaluate sentence equivalence, and classify their similarities as 'equivalent' or 'not_equivalent'.
-Language: korean, acc: 87.80%, prompt: Judge whether two sentences have the same meaning to each other, and determine the sentence equivalence with 'equivalent' or 'not_equivalent'.
-
-# rte
-
-## 10 prompts
-
-Acc: 93.50%, prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: Examine the pair of sentences and determine if they exhibit entailment or not_entailment. Answer with either 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: Review the two statements and categorize their relationship as either 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word.
-Acc: 92.78%, prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: Considering the two sentences, identify if their relationship is 'entailment' or 'not_entailment'.
-Acc: 92.42%, prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-
-Acc: 93.86%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Acc: 93.50%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Acc: 93.14%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Acc: 92.78%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment'.
-
-## bertattack
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"a the relationship between the given sentences carry entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"a as the given pair of verb demonstrates entailment or not_entailment. question with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 91.34%, dropped acc: 2.17%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"be if the given conjunction of and displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.06%, dropped acc: 1.44%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"acting as an entailment classindicator, identify neither the given pair of sentences displays entailment or not_entailment. reply with 'entailment' or 'not_entailment'. "
-Original acc: 93.86%, attacked acc: 65.34%, dropped acc: 28.52%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given complement of verbs provides entailment or not_entailment. sentence with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 89.53%, dropped acc: 3.97%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"her a tool for determining entailment about, structure the two statements and categorize their connection as either 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 91.70%, dropped acc: 1.81%
-
-## checklist
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment' CuVpbbkC4c. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' d6ZQ3u0GBQ. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' 5yWbBXztUY. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' CuVpbbkC4c. "
-Original acc: 93.86%, attacked acc: 93.50%, dropped acc: 0.36%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' BWmsLx8xOA. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment' 5XeflW1ZJc. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-## deepwordbug
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Yoes the relationship between the given sentences repersent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Identify whDther the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Determine if the given paGr of sentenecs displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Woeking as an entailment classifier, identify whether the given pair of sentences diplays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.86%, attacked acc: 92.78%, dropped acc: 1.08%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 93.50%, dropped acc: 0.36%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"AL a tool for determining entailment relationhsips, review the two staJements and categorize their connection as either 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-## stresstest
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.86%, attacked acc: 92.78%, dropped acc: 1.08%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.50%, attacked acc: 93.14%, dropped acc: 0.36%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 93.50%, attacked acc: 92.06%, dropped acc: 1.44%
-
-## textbugger
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Does the relationship between the afforded sentences re present entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Identifying whether the given pair of sentencing demonstrates entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Determine if the given pair of penalties displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify both the given piar of sentences displaying entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.86%, attacked acc: 90.97%, dropped acc: 2.89%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of senetnces demonstrates entailment or not_entailment. Answe\xe2\xb2\x85 with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"As a tool for deermining entailment relationships, review the secondly statements and categor\xd1\x96ze their connection as either 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.06%, dropped acc: 1.44%
-
-## textfooler
-
-Original prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Does the relationship between the given sentences depicted entailment or not_entailment? Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Deciding whether the given pair of sentencing demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-Original prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Define if the given pair of chastisement displays entailment or not_entailment. Answered with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.42%, dropped acc: 1.08%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given torque of sanction displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 93.86%, attacked acc: 92.42%, dropped acc: 1.44%
-
-Original prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Acting as an entailment detection instrument, determine if the given pair of sentences denotes entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 92.78%, dropped acc: 0.72%
-
-Original prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Attacked prompt: b"Because a device for determining entailment relationships, review the two words and categorize their connection as either 'entailment' or 'not_entailment'. "
-Original acc: 93.50%, attacked acc: 91.34%, dropped acc: 2.17%
-
-## translation
-
-Language: chinese, acc: 93.14%, prompt: In the light of an implication analysis tool, evaluate the relationship between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 91.70%, prompt: From the perspective of an implication analysis tool, determine whether there is an implication relationship in the following sentences by classifying them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 91.34%, prompt: Please use an implication analysis tool to determine whether an implication relationship exists in the following sentences by classifying them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 93.14%, prompt: Please evaluate the relation of the following sentences as 'entailment' or 'not_entailment' for the purpose of determining implication relation.
-Language: chinese, acc: 91.70%, prompt: Please use the implication analysis tool to evaluate the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 92.42%, prompt: For the purpose of determining implicative relations, analyze the relations of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 92.06%, prompt: Please use the implication analysis tool to determine the relationship of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 91.70%, prompt: Please use the implication judgment tool to assess the relevance of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 92.78%, prompt: Please, with implication analysis as the main task, determine the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 92.42%, prompt: Using the implication judgment as a criterion, analyze the relation of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.06%, prompt: As an engagement analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.42%, prompt: Determine whether the given sentences involve one another or not as an implication analysis tool. Classify them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.06%, prompt: Using implication analysis, evaluate whether the sentences provided have a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 93.50%, prompt: As an engagement assessment tool, determine whether the sentences provided have a logical relationship and classify them as 'entailment' or 'not_entailment'.
-Language: french, acc: 91.70%, prompt: As an implication classification tool, analyze the sentences provided to determine if there is a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.78%, prompt: Using implication analysis, determine whether the given sentences have a cause-effect relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 93.14%, prompt: Evaluate the relationship between the given sentences using implication analysis and rank them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.42%, prompt: As an engagement detection tool, determine whether the given sentences have a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.78%, prompt: Using implication analysis, evaluate whether the sentences provided have a cause-effect relationship and rank them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 92.42%, prompt: Determine whether the given sentences have a cause-effect relationship as an engagement analysis tool and categorize them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.42%, prompt: In your role as a tool for reasoning analysis, evaluate the relationship between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.78%, prompt: Can you determine whether this sentence is inferred from the other sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.42%, prompt: Using the tool of reasoning analysis, analyze the relationship between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.78%, prompt: Does this sentence represent a conclusion from the previous sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 91.70%, prompt: As a tool of reasoning analysis, evaluate the relationship of given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.78%, prompt: Can this sentence be inferred from the previous sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.42%, prompt: Using a tool to analyze a conclusion, analyze the relationship between the two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.78%, prompt: Is this a conclusion from the next sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 92.42%, prompt: As part of your task in analyzing a conclusion, evaluate the relationship between the two sentences and classify them as 'entailment' or 'not_entailment' based on their relationship.
-Language: arabic, acc: 92.78%, prompt: Are you following this sentence directly from the previous one? Classify it as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 91.70%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 93.86%, prompt: Determine whether the second sentence necessarily implies the first and label the relation as 'entailment', or as 'not_entailment' if not.
-Language: spanish, acc: 92.78%, prompt: Classifies the relationship between these two sentences as 'entailment' if one necessarily implies the other, or as 'not_entailment' if not.
-Language: spanish, acc: 93.14%, prompt: Evaluates whether the information in the second sentence is implied in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 92.78%, prompt: Given a couple of phrases, label their relationship as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 92.06%, prompt: Analyzes the relationship between the phrases and classifies them as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 93.14%, prompt: Given two sentences, determine whether the second sentence is a necessary consequence of the first and label the relation as 'entailment', or as 'not_entailment' if not.
-Language: spanish, acc: 93.14%, prompt: Evaluates whether the information presented in the second sentence is implicit in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 93.14%, prompt: Classifies the relationship between the given phrases as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 92.42%, prompt: Determines whether the information provided in the second sentence is necessarily inferable from the first and labels the relationship as 'entailment', or as 'not_entailment' if not.
-Language: japanese, acc: 91.70%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 92.06%, prompt: Evaluate the semantic relationship of the sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 92.78%, prompt: Please judge the relationship between the given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 90.97%, prompt: Examine the paraphrases of a given sentence and classify them 'entailment' or 'not_entailment'.
-Language: japanese, acc: 91.70%, prompt: Rate the similarity of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 91.34%, prompt: Determinate the semantic connections of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 90.61%, prompt: Examine the semantic match of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 92.78%, prompt: Classify it as 'entailment' or 'not_entailment' based on the content of the sentence.
-Language: japanese, acc: 91.70%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 91.34%, prompt: Judge the semantic connections of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: korean, acc: 92.42%, prompt: Evaluate the relationship between any two sentences given to you and classify you as 'entailment' or 'not_entailment'.
-Language: korean, acc: 92.42%, prompt: Analyze the semantic deductive relations between sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Evaluate the logical relevance between sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Evaluate the interaction of two given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Please check whether there is a semantic match between those two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Compare information between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Please analyse the correlation between those two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 93.14%, prompt: Evaluate the different meanings between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 92.78%, prompt: Compare the semantic structure of the two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 92.42%, prompt: Evaluate the interactions between sentences and classify them as 'entailment' or 'not_entailment'.
-
-# sst2
-
-## 10 prompts
-
-Acc: 96.79%, prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Acc: 96.79%, prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Acc: 96.79%, prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Acc: 96.79%, prompt: Review this statement and decide whether it has a 'positive' or 'negative' sentiment.
-Acc: 96.67%, prompt: Analyze the tone of this statement and respond with either 'positive' or 'negative'.
-Acc: 96.22%, prompt: Please identify the emotional tone of this passage: 'positive' or 'negative'?
-Acc: 96.10%, prompt: Assess the mood of the following quote and determine if it's 'positive' or 'negative'.
-Acc: 95.76%, prompt: Considering the given phrase, would you say it carries a 'positive' or 'negative' connotation?
-Acc: 95.18%, prompt: Given the context of this text, indicate if the emotion conveyed is 'positive' or 'negative'.
-Acc: 92.66%, prompt: After examining the following expression, label its emotion as either 'positive' or 'negative'.
-
-Acc: 96.90%, prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Acc: 96.56%, prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Acc: 96.56%, prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Acc: 96.56%, prompt: Acting as a sentiment evaluator, identify if the given sentence is 'positive' or 'negative'.
-Acc: 96.56%, prompt: Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'.
-Acc: 96.44%, prompt: In the capacity of a sentiment classifier, decide whether the given quote is 'positive' or 'negative'.
-Acc: 96.44%, prompt: Serving as a sentiment evaluation model, determine if the given statement is 'positive' or 'negative'.
-Acc: 96.44%, prompt: Emulating a sentiment classification system, indicate whether the provided text is 'positive' or 'negative'.
-Acc: 95.87%, prompt: As an emotion detector, determine if the provided passage conveys a 'positive' or 'negative' sentiment.
-Acc: 95.07%, prompt: Taking on the role of an emotion classifier, specify if the provided phrase is 'positive' or 'negative'.
-
-## bertattack
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"search the provided excerpt and play between 'positive' and 'negative' to describe its mood. "
-Original acc: 96.90%, attacked acc: 96.10%, dropped acc: 0.80%
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"assess the sentiment of the given language and classify it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 95.64%, dropped acc: 1.15%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"locate the associate emotion of this sentence, categorizing it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 92.20%, dropped acc: 4.59%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a emotion analyzer, please sort if the cross text is 'positive' or 'negative'. "
-Original acc: 96.67%, attacked acc: 95.64%, dropped acc: 1.03%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"in a sentiment classifier, determine where the over words is 'positive' or 'negative'. "
-Original acc: 96.56%, attacked acc: 95.30%, dropped acc: 1.26%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"when the role of a sentiment analysis instrument, responding with 'positive' or 'negative' to contrast this statement. "
-Original acc: 96.56%, attacked acc: 95.87%, dropped acc: 0.69%
-
-## checklist
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"Evaluate the sentiment of the given text and classify it as 'positive' or 'negative' Cev0s49fbe. "
-Original acc: 96.79%, attacked acc: 96.56%, dropped acc: 0.23%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative' MSAddJ4D2a. "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment SvAp8RlOFn. "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative' MSAddJ4D2a. "
-Original acc: 96.90%, attacked acc: 96.56%, dropped acc: 0.34%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"As a sentiment classifier, determine whether the following text is 'positive' or 'negative' 6a4Yn3RGVc. "
-Original acc: 96.56%, attacked acc: 96.44%, dropped acc: 0.11%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement 6a4Yn3RGVc. "
-Original acc: 96.56%, attacked acc: 96.67%, dropped acc: -0.11%
-
-## deepwordbug
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"Evaluate the sentiment of the given tXxt and classify it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 96.33%, dropped acc: 0.46%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"Determine the ovreall sentiment of this sentence, categorizing it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment. "
-Original acc: 96.79%, attacked acc: 96.79%, dropped acc: 0.00%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a sentiment analyzer, Jlease findicate if the followign text is 'positive' or 'negative'. "
-Original acc: 96.90%, attacked acc: 96.22%, dropped acc: 0.69%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"As a sentiment classifier, determine whether the followJing text is 'positive' or 'negative'. "
-Original acc: 96.56%, attacked acc: 96.10%, dropped acc: 0.46%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement. "
-Original acc: 96.56%, attacked acc: 96.56%, dropped acc: 0.00%
-
-## stresstest
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"Evaluate the sentiment of the given text and classify it as 'positive' or 'negative' and false is not true . "
-Original acc: 96.79%, attacked acc: 96.79%, dropped acc: 0.00%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative' and false is not true . "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment and true is true . "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative' and false is not true . "
-Original acc: 96.90%, attacked acc: 96.67%, dropped acc: 0.23%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"As a sentiment classifier, determine whether the following text is 'positive' or 'negative' and false is not true . "
-Original acc: 96.56%, attacked acc: 96.33%, dropped acc: 0.23%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement and true is true and true is true and true is true and true is true and true is true . "
-Original acc: 96.56%, attacked acc: 96.56%, dropped acc: 0.00%
-
-## textbugger
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"Evalu ate the sentiment of the given text and classify it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 96.56%, dropped acc: 0.23%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"Determine the general sentiment of this condemnation, categorizing it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 95.87%, dropped acc: 0.92%
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"Maltese the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment. "
-Original acc: 96.79%, attacked acc: 96.67%, dropped acc: 0.11%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a mood analyzer, invites ind icate if the following text is 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 96.10%, dropped acc: 0.69%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"In the role of a emotion analysis tool, r\xd0\xb5spond with 'positive' or 'negative' to classify this statement. "
-Original acc: 96.67%, attacked acc: 96.10%, dropped acc: 0.57%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"As a emotion classifier, determine whether the foll\xd0\xbewing wording is 'positive' or 'negative'. "
-Original acc: 96.56%, attacked acc: 95.18%, dropped acc: 1.38%
-
-## textfooler
-
-Original prompt: Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.
-Attacked prompt: b"Measured the emotion of the handed text and classify it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 95.18%, dropped acc: 1.61%
-
-Original prompt: Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.
-Attacked prompt: b"Determine the plenary senses of this condemning, categorizing it as 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 93.46%, dropped acc: 3.33%
-
-Original prompt: Read the provided excerpt and choose between 'positive' and 'negative' to describe its sentiment.
-Attacked prompt: b"Lire the rendered excerpt and choose between 'positive' and 'negative' to identifies its emotion. "
-Original acc: 96.79%, attacked acc: 94.61%, dropped acc: 2.18%
-
-Original prompt: Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'.
-Attacked prompt: b"Working as a emotion analyzer, please demonstrates if the following texts is 'positive' or 'negative'. "
-Original acc: 96.79%, attacked acc: 96.10%, dropped acc: 0.69%
-
-Original prompt: In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.
-Attacked prompt: b"In the role of a emotion analysis tool, respond with 'positive' or 'negative' to classify this languages. "
-Original acc: 96.67%, attacked acc: 95.64%, dropped acc: 1.03%
-
-Original prompt: As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.
-Attacked prompt: b"As a emotion classifier, ascertain whether the suite wording is 'positive' or 'negative'. "
-Original acc: 96.56%, attacked acc: 95.18%, dropped acc: 1.38%
-
-## translation
-
-Language: chinese, acc: 96.79%, prompt: Answer whether the statement is 'positive' or 'negative' based on sentiment analysis.
-Language: chinese, acc: 94.38%, prompt: As an emotion analysis tool, determine whether the emotion in the text is 'positive' or 'negative'.
-Language: chinese, acc: 96.44%, prompt: Categorize the statement as 'positive' or 'negative', based on its emotional bent.
-Language: chinese, acc: 96.44%, prompt: Please use sentiment analysis to classify the text as 'positive' or 'negative'.
-Language: chinese, acc: 94.38%, prompt: Please determine whether the emotion of the sentence is 'positive' or 'negative' and categorize it.
-Language: chinese, acc: 96.67%, prompt: Using sentiment analysis, classify the text as 'positive' or 'negative'.
-Language: chinese, acc: 94.38%, prompt: Please answer whether the emotion of the sentence is 'positive' or 'negative' and categorize it.
-Language: chinese, acc: 96.79%, prompt: Categorize the statement as 'positive' or 'negative' based on sentiment analysis.
-Language: chinese, acc: 96.67%, prompt: Please judge this text as 'positive' or 'negative' according to the criteria of sentiment analysis.
-Language: chinese, acc: 95.64%, prompt: Please classify this sentence as 'positive' or 'negative' according to the principles of emotion analysis.
-Language: french, acc: 96.67%, prompt: As a sentiment analysis tool, please answer with 'positive' or 'negative' to classify this statement.
-Language: french, acc: 96.33%, prompt: Determine whether this phrase is 'positive' or 'negative' as a sentiment classification tool.
-Language: french, acc: 96.79%, prompt: Identify the tone of this statement by choosing between 'positive' and 'negative' as a sentiment analysis tool.
-Language: french, acc: 96.79%, prompt: Use sentiment analysis to classify this statement as 'positive' or 'negative'.
-Language: french, acc: 96.56%, prompt: As a sentiment classification tool, please determine whether this statement is 'positive' or 'negative'.
-Language: french, acc: 96.67%, prompt: Classify this sentence as 'positive' or 'negative' using sentiment analysis.
-Language: french, acc: 96.79%, prompt: Choose between 'positive' or 'negative' to classify this statement as a sentiment analysis tool.
-Language: french, acc: 96.79%, prompt: Identify the sentiment expressed in this statement by selecting 'positive' or 'negative' as a sentiment classification tool.
-Language: french, acc: 96.79%, prompt: Determine whether this phrase is 'positive' or 'negative' using sentiment analysis as a classification tool.
-Language: french, acc: 96.79%, prompt: Use sentiment analysis to classify this statement as 'positive' or 'negative'.
-Language: arabic, acc: 96.33%, prompt: Under emotional analysis, answer 'positive' or 'negative' to classify this statement.
-Language: arabic, acc: 96.67%, prompt: Does this statement express a 'positive' or 'negative' reaction?
-Language: arabic, acc: 96.33%, prompt: Is that a 'positive' or a 'negative' phrase?
-Language: arabic, acc: 97.02%, prompt: What is the classification between 'positive' and 'negative'?
-Language: arabic, acc: 96.56%, prompt: Does this sentence express 'positive' or 'negative' feelings?
-Language: arabic, acc: 96.79%, prompt: In the context of textual analysis, what classification is this phrase between 'positive' and 'negative'?
-Language: arabic, acc: 96.79%, prompt: Could this be classified as 'positive' or 'negative'?
-Language: arabic, acc: 96.33%, prompt: In the context of emotional analysis, what classification is this statement between 'positive' and 'negative'?
-Language: arabic, acc: 96.79%, prompt: Can this be classified as 'positive' or 'negative'?
-Language: arabic, acc: 95.18%, prompt: Under the classification of emotions, is this sentence 'positive' or 'negative'?
-Language: spanish, acc: 96.67%, prompt: As a feeling analysis tool, classify this statement as 'positive' or 'negative'.
-Language: spanish, acc: 96.44%, prompt: Determine whether this statement has a 'positive' or 'negative' connotation.
-Language: spanish, acc: 96.90%, prompt: Indicate whether the following statement is 'positive' or 'negative'.
-Language: spanish, acc: 95.87%, prompt: Evaluate whether this text has a 'positive' or 'negative' emotional charge.
-Language: spanish, acc: 96.67%, prompt: According to your sentiment analysis, would you say this comment is 'positive' or 'negative'?
-Language: spanish, acc: 96.67%, prompt: In the context of sentiment analysis, label this sentence as 'positive' or 'negative'.
-Language: spanish, acc: 96.90%, prompt: Rate the following statement as 'positive' or 'negative', according to your sentiment analysis.
-Language: spanish, acc: 96.22%, prompt: How would you classify this text in terms of its emotional tone? 'positive' or 'negative'?
-Language: spanish, acc: 96.67%, prompt: As a tool for sentiment analysis, would you say this statement is 'positive' or 'negative'?
-Language: spanish, acc: 96.90%, prompt: Classify this statement as 'positive' or 'negative', please.
-Language: japanese, acc: 95.30%, prompt: Treat this sentence as an emotion analysis tool and categorize it as 'positive' and 'negative'.
-Language: japanese, acc: 96.79%, prompt: Use this article as a sentiment analysis tool to classify 'positive' and 'negative'.
-Language: japanese, acc: 95.53%, prompt: Use this sentence as an emotion analysis tool to determine whether it is 'positive' or 'negative'.
-Language: japanese, acc: 95.07%, prompt: Use this sentence as an emotion analysis tool to classify 'positive' and 'negative'.
-Language: japanese, acc: 96.56%, prompt: Use this sentence as a sentiment analysis tool and classify it as 'positive' or 'negative'.
-Language: japanese, acc: 96.67%, prompt: To classify this sentence as 'positive' or 'negative', evaluate it as a sentiment analysis tool.
-Language: japanese, acc: 95.76%, prompt: Treat this sentence as an emotion analysis tool to determine whether it is 'positive' or 'negative'.
-Language: japanese, acc: 96.56%, prompt: Use this sentence as a sentiment analysis tool to classify 'positive' and 'negative'.
-Language: japanese, acc: 95.30%, prompt: Analyze this sentence as an emotion analysis tool to classify whether it is 'positive' or 'negative'.
-Language: japanese, acc: 95.76%, prompt: Use this sentence as an emotional analysis tool to determine whether it is 'positive' or 'negative'.
-Language: korean, acc: 96.10%, prompt: As an emotional analysis tool, respond with 'positive' or 'negative' to classify these sentences.
-Language: korean, acc: 96.79%, prompt: Classify this sentence as 'positive' if you regard it as positive, 'negative' if you regard it as negative.
-Language: korean, acc: 95.30%, prompt: Please rate the emotion of this sentence and classify it as 'positive' or 'negative'.
-Language: korean, acc: 97.02%, prompt: Classify this sentence as 'positive' if you perceive it positively and 'negative' if you perceive it negatively.
-Language: korean, acc: 96.56%, prompt: If this is a sentence delivered using a positive expression, classify it as 'positive' and if this is a sentence delivered using a negative expression, classify it as 'negative'.
-Language: korean, acc: 96.67%, prompt: Respond with 'positive' or 'negative' by categorizing whether the sentence is positive or negative.
-Language: korean, acc: 94.38%, prompt: Please analyze the emotion in this sentence and classify it as 'positive' or 'negative'.
-Language: korean, acc: 96.56%, prompt: Classify this sentence as 'positive' if it contains a positive meaning, 'negative' if it contains a negative meaning.
-Language: korean, acc: 96.56%, prompt: Classify this sentence as 'positive' if it contains positive content, 'negative' if it contains negative content.
-Language: korean, acc: 96.56%, prompt: Classify what you're trying to convey in this sentence as 'positive' if it's positive, and 'negative' if it's negative.
-
-# wnli
-
-## 10 prompts
-
-Acc: 77.46%, prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Acc: 77.46%, prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Acc: 77.46%, prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Are the following two sentences entailment or not_entailment? Answer me with "entailment" or "not_entailment", just one word.
-Acc: 76.06%, prompt: Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Examine the pair of sentences and determine if they exhibit entailment or not_entailment. Answer with either 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Analyze the two provided sentences and decide if their relationship is 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Review the two statements and categorize their relationship as either 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Considering the two sentences, identify if their relationship is 'entailment' or 'not_entailment'.
-
-Acc: 77.46%, prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: While performing entailment analysis, classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: In the capacity of an entailment assessment system, indicate if the link between the following sentences is 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment'.
-Acc: 76.06%, prompt: In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.
-
-## bertattack
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"a the links between the following sentences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please address the relationship between the provided sentences as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the affiliation between the following sentences is 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classauxiliary, identify but the verbal a of nouns displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 66.20%, dropped acc: 11.27%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the conversation between the verbal sentences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 70.42%, dropped acc: 5.63%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment verbal system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 74.65%, dropped acc: 1.41%
-
-## checklist
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment' 5yWbBXztUY. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment' g0vBZf3tQC. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' 5XeflW1ZJc. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' 5XeflW1ZJc. "
-Original acc: 77.46%, attacked acc: 77.46%, dropped acc: 0.00%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment' 5XeflW1ZJc. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment' 5XeflW1ZJc. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-## deepwordbug
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Assuss the connection between the following sensences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please classify the relationNship between the provided sentences as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the conection between the following sentences is 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifeir, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-## stresstest
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the connection between the following sentences is 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 76.06%, attacked acc: 74.65%, dropped acc: 1.41%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment' and false is not true . "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-## textbugger
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Assss the connection between the following sentences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please cassify the relationship between the provided sentences as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the con nection between the following sentences is 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 76.06%, dropped acc: 1.41%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment cla ssifier, identify whether the given pair of punishments displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the relationship between the given punishments and categorize it as 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 71.83%, dropped acc: 4.23%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-## textfooler
-
-Original prompt: Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Appraisal the attach between the suite condemns and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 73.24%, dropped acc: 4.23%
-
-Original prompt: Please classify the relationship between the provided sentences as 'entailment' or 'not_entailment'.
-Attacked prompt: b"Please hierarchy the relationship between the provided punishment as 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.
-Attacked prompt: b"Indicate if the login between the following sentences is 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: Working as an entailment classifier, identify whether the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"Working as an entailment classifier, identify whether the given couple of condemnation displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'. "
-Original acc: 77.46%, attacked acc: 74.65%, dropped acc: 2.82%
-
-Original prompt: In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Attacked prompt: b"In your role as an entailment analysis tool, assess the reports between the given sentences and classify it as 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 73.24%, dropped acc: 2.82%
-
-Original prompt: As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.
-Attacked prompt: b"As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'. "
-Original acc: 76.06%, attacked acc: 76.06%, dropped acc: 0.00%
-
-## translation
-
-Language: chinese, acc: 77.46%, prompt: In the light of an implication analysis tool, evaluate the relationship between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 76.06%, prompt: From the perspective of an implication analysis tool, determine whether there is an implication relationship in the following sentences by classifying them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 76.06%, prompt: Please use an implication analysis tool to determine whether an implication relationship exists in the following sentences by classifying them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 76.06%, prompt: Please evaluate the relation of the following sentences as 'entailment' or 'not_entailment' for the purpose of determining implication relation.
-Language: chinese, acc: 77.46%, prompt: Please use the implication analysis tool to evaluate the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 74.65%, prompt: For the purpose of determining implicative relations, analyze the relations of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 77.46%, prompt: Please use the implication analysis tool to determine the relationship of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 76.06%, prompt: Please use the implication judgment tool to assess the relevance of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 77.46%, prompt: Please, with implication analysis as the main task, determine the relationships between the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: chinese, acc: 76.06%, prompt: Using the implication judgment as a criterion, analyze the relation of the following sentences and classify them as 'entailment' or 'not_entailment'.
-Language: french, acc: 77.46%, prompt: As an engagement analysis tool, evaluate the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.
-Language: french, acc: 76.06%, prompt: Determine whether the given sentences involve one another or not as an implication analysis tool. Classify them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 76.06%, prompt: Using implication analysis, evaluate whether the sentences provided have a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 77.46%, prompt: As an engagement assessment tool, determine whether the sentences provided have a logical relationship and classify them as 'entailment' or 'not_entailment'.
-Language: french, acc: 76.06%, prompt: As an implication classification tool, analyze the sentences provided to determine if there is a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 74.65%, prompt: Using implication analysis, determine whether the given sentences have a cause-effect relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 77.46%, prompt: Evaluate the relationship between the given sentences using implication analysis and rank them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 77.46%, prompt: As an engagement detection tool, determine whether the given sentences have a logical relationship and categorize them as 'entailment' or 'not_entailment'.
-Language: french, acc: 77.46%, prompt: Using implication analysis, evaluate whether the sentences provided have a cause-effect relationship and rank them accordingly as 'entailment' or 'not_entailment'.
-Language: french, acc: 74.65%, prompt: Determine whether the given sentences have a cause-effect relationship as an engagement analysis tool and categorize them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: In your role as a tool for reasoning analysis, evaluate the relationship between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Can you determine whether this sentence is inferred from the other sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Using the tool of reasoning analysis, analyze the relationship between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Does this sentence represent a conclusion from the previous sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: As a tool of reasoning analysis, evaluate the relationship of given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Can this sentence be inferred from the previous sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Using a tool to analyze a conclusion, analyze the relationship between the two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: Is this a conclusion from the next sentence? Classify it as 'entailment' or 'not_entailment'.
-Language: arabic, acc: 77.46%, prompt: As part of your task in analyzing a conclusion, evaluate the relationship between the two sentences and classify them as 'entailment' or 'not_entailment' based on their relationship.
-Language: arabic, acc: 77.46%, prompt: Are you following this sentence directly from the previous one? Classify it as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 76.06%, prompt: In your role as an implication analysis tool, evaluate the relationship between the given phrases and classify them as 'entailment' or 'not_entailment'.
-Language: spanish, acc: 76.06%, prompt: Determine whether the second sentence necessarily implies the first and label the relation as 'entailment', or as 'not_entailment' if not.
-Language: spanish, acc: 76.06%, prompt: Classifies the relationship between these two sentences as 'entailment' if one necessarily implies the other, or as 'not_entailment' if not.
-Language: spanish, acc: 76.06%, prompt: Evaluates whether the information in the second sentence is implied in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 74.65%, prompt: Given a couple of phrases, label their relationship as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 74.65%, prompt: Analyzes the relationship between the phrases and classifies them as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 74.65%, prompt: Given two sentences, determine whether the second sentence is a necessary consequence of the first and label the relation as 'entailment', or as 'not_entailment' if not.
-Language: spanish, acc: 74.65%, prompt: Evaluates whether the information presented in the second sentence is implicit in the first and labels the relationship as 'entailment', or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 74.65%, prompt: Classifies the relationship between the given phrases as 'entailment' if one necessarily implies the other, or as 'not_entailment' if there is no such implication.
-Language: spanish, acc: 77.46%, prompt: Determines whether the information provided in the second sentence is necessarily inferable from the first and labels the relationship as 'entailment', or as 'not_entailment' if not.
-Language: japanese, acc: 77.46%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 76.06%, prompt: Evaluate the semantic relationship of the sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 77.46%, prompt: Please judge the relationship between the given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 80.28%, prompt: Examine the paraphrases of a given sentence and classify them 'entailment' or 'not_entailment'.
-Language: japanese, acc: 77.46%, prompt: Rate the similarity of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 76.06%, prompt: Determinate the semantic connections of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 76.06%, prompt: Examine the semantic match of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 76.06%, prompt: Classify it as 'entailment' or 'not_entailment' based on the content of the sentence.
-Language: japanese, acc: 77.46%, prompt: Analyze the relationship of a given sentence and classify it as 'entailment' or 'not_entailment'.
-Language: japanese, acc: 76.06%, prompt: Judge the semantic connections of a given sentence and categorize it as 'entailment' or 'not_entailment'.
-Language: korean, acc: 77.46%, prompt: Evaluate the relationship between any two sentences given to you and classify you as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Analyze the semantic deductive relations between sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Evaluate the logical relevance between sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Evaluate the interaction of two given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Please check whether there is a semantic match between those two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Compare information between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 77.46%, prompt: Please analyse the correlation between those two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 77.46%, prompt: Evaluate the different meanings between given sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 76.06%, prompt: Compare the semantic structure of the two sentences and classify them as 'entailment' or 'not_entailment'.
-Language: korean, acc: 77.46%, prompt: Evaluate the interactions between sentences and classify them as 'entailment' or 'not_entailment'.
-
-# mmlu
-
-## 10 prompts
-
-Acc: 54.08%, prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Acc: 53.90%, prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Acc: 53.90%, prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Acc: 53.90%, prompt: Evaluate the multiple-choice question about {} and select the most fitting response from 'A', 'B', 'C', or 'D'.
-Acc: 53.90%, prompt: Examine the following question based on {} and choose the correct response from 'A', 'B', 'C', or 'D'.
-Acc: 53.90%, prompt: Answer the subsequent multiple-choice question about {} by picking the right option among 'A', 'B', 'C', or 'D'.
-Acc: 53.55%, prompt: Answer the following multiple-choice question about {} by selecting the correct option: 'A', 'B', 'C', or 'D'.
-Acc: 53.37%, prompt: In relation to the multiple-choice question on {}, please provide the accurate answer by choosing 'A', 'B', 'C', or 'D'.
-Acc: 53.19%, prompt: For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'.
-Acc: 53.01%, prompt: Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'.
-
-Acc: 54.08%, prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Acc: 53.72%, prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Acc: 53.55%, prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Acc: 53.55%, prompt: With your knowledge of {}, tackle the following multiple-choice question by choosing 'A', 'B', 'C', or 'D'.
-Acc: 53.55%, prompt: Utilizing your expertise in {}, answer the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Acc: 53.55%, prompt: With your understanding of {}, kindly answer the subsequent multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Acc: 53.37%, prompt: As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'.
-Acc: 53.37%, prompt: As a knowledgeable individual in {}, provide your response to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'.
-Acc: 53.37%, prompt: As a skilled person in the field of {}, please respond to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'.
-Acc: 53.01%, prompt: Drawing upon your mastery of {}, please answer the multiple-choice question by selecting the correct option from 'A', 'B', 'C', or 'D'.
-
-## bertattack
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"for the multiple-choice clue related to {}, please choose the most accurate text from 'A', 'B', 'C', or 'D'. "
-Original acc: 54.08%, attacked acc: 52.13%, dropped acc: 1.95%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Below are multiple-guessing guess concerning {}. begin your response with 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 51.24%, dropped acc: 2.66%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"an the following multiple-options question on {}, pick the better answer from the words 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 52.30%, dropped acc: 1.60%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"considering your familiarity with {}, order to the following multiple-picked question by starting 'A', 'B', 'C', or 'D'."
-Original acc: 54.08%, attacked acc: 51.77%, dropped acc: 2.30%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"on an amateur in {}, respond to the following multiple-choice clue by selecting 'A', 'B', 'C', or 'D'."
-Original acc: 53.72%, attacked acc: 52.66%, dropped acc: 1.06%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"provided your degree in {}, please answer the subsequent multiple-name question with 'A', 'B', 'C', or 'D'."
-Original acc: 53.55%, attacked acc: 52.13%, dropped acc: 1.42%
-
-## checklist
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' xikCjkMydH. "
-Original acc: 54.08%, attacked acc: 53.19%, dropped acc: 0.89%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D' RcRneWo6Iv. "
-Original acc: 53.90%, attacked acc: 52.84%, dropped acc: 1.06%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D' EAB4KP2NVY. "
-Original acc: 53.90%, attacked acc: 53.19%, dropped acc: 0.71%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D' Q47Jmd4lMV."
-Original acc: 54.08%, attacked acc: 53.01%, dropped acc: 1.06%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D' lf8wBa2yEm."
-Original acc: 53.72%, attacked acc: 53.01%, dropped acc: 0.71%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D' IhuBIhoPGc."
-Original acc: 53.55%, attacked acc: 53.55%, dropped acc: 0.00%
-
-## deepwordbug
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Fro the multiple-choice queKstion related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. "
-Original acc: 54.08%, attacked acc: 53.01%, dropped acc: 1.06%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Belvw are multiple-chioce question conecrning {}. IndicaZe your response with 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 52.30%, dropped acc: 1.60%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"RegOarding the foVlowing multiple-choice question on {}, pick the correct answer from the optoins 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 53.19%, dropped acc: 0.71%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Considering your familiarity with {}, atKtend to the follownig multipleNchoice juestion by picking 'A', 'B', 'C', or 'D'."
-Original acc: 54.08%, attacked acc: 52.66%, dropped acc: 1.42%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"As an experV in {}, respond to the following muliple-choice quesrion by selectnig 'A', 'B', 'C', or 'D'."
-Original acc: 53.72%, attacked acc: 52.84%, dropped acc: 0.89%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Given your proifciency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'."
-Original acc: 53.55%, attacked acc: 53.19%, dropped acc: 0.35%
-
-## stresstest
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' and false is not true . "
-Original acc: 54.08%, attacked acc: 52.30%, dropped acc: 1.77%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D' and false is not true . "
-Original acc: 53.90%, attacked acc: 51.77%, dropped acc: 2.13%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D' and false is not true . "
-Original acc: 53.90%, attacked acc: 52.84%, dropped acc: 1.06%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D' and false is not true ."
-Original acc: 54.08%, attacked acc: 51.42%, dropped acc: 2.66%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D' and false is not true ."
-Original acc: 53.72%, attacked acc: 52.66%, dropped acc: 1.06%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D' and false is not true ."
-Original acc: 53.55%, attacked acc: 52.30%, dropped acc: 1.24%
-
-## textbugger
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"For the multiple-\xcf\xb2hoice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'. "
-Original acc: 54.08%, attacked acc: 53.55%, dropped acc: 0.53%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Bel\xd0\xbew are multipl-echoice qu\xd0\xb5stion concening {}. Indicate your response with 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 52.30%, dropped acc: 1.60%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Regardin\xc9\xa1 the following multiple-choice question on {}, pick the correct answer from the optoins 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 53.19%, dropped acc: 0.71%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Considering your familiarity with {}, attend to the following mutliple-choice matter by picking 'A', 'B', 'C', or 'D'."
-Original acc: 54.08%, attacked acc: 52.48%, dropped acc: 1.60%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"As an whiz in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'."
-Original acc: 53.72%, attacked acc: 52.66%, dropped acc: 1.06%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Given your proifciency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'."
-Original acc: 53.55%, attacked acc: 53.19%, dropped acc: 0.35%
-
-## textfooler
-
-Original prompt: For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Within the multiple-choice item relevant to {}, please decides the most careful rebuttal from 'A', 'B', 'C', or 'D'. "
-Original acc: 54.08%, attacked acc: 24.82%, dropped acc: 29.26%
-
-Original prompt: Below are multiple-choice question concerning {}. Indicate your response with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Under are multiple-choice subjects implying {}. Indicate your interventions with 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 52.13%, dropped acc: 1.77%
-
-Original prompt: Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Relate the following multiple-choice things on {}, taking the correct answer from the alternating 'A', 'B', 'C', or 'D'. "
-Original acc: 53.90%, attacked acc: 52.30%, dropped acc: 1.60%
-
-Original prompt: Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Considering your familiarity with {}, attend to the following multiple-choice issues by picking 'A', 'B', 'C', or 'D'."
-Original acc: 54.08%, attacked acc: 52.84%, dropped acc: 1.24%
-
-Original prompt: As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"As an whiz in {}, respond to the following multiple-choice question by decided 'A', 'B', 'C', or 'D'."
-Original acc: 53.72%, attacked acc: 51.95%, dropped acc: 1.77%
-
-Original prompt: Given your proficiency in {}, please answer the subsequent multiple-choice question with 'A', 'B', 'C', or 'D'.
-Attacked prompt: b"Given your prerogatives in {}, please answers the subsequent multiple-choice subject with 'A', 'B', 'C', or 'D'."
-Original acc: 53.55%, attacked acc: 52.48%, dropped acc: 1.06%
-
-## translation
-
-Language: chinese, acc: 53.72%, prompt: For the following multiple choice question about {}, please select the correct answer from 'A', 'B', 'C' or 'D'.
-Language: chinese, acc: 53.72%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question for {}.
-Language: chinese, acc: 54.26%, prompt: Select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question {}.
-Language: chinese, acc: 53.90%, prompt: Please choose the correct answer from 'A', 'B', 'C' or 'D' according to the following multiple-choice questions about {}.
-Language: chinese, acc: 53.72%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the {} multiple choice questions below.
-Language: chinese, acc: 53.72%, prompt: The following is A multiple choice question about {}. Please select the correct answer from 'A', 'B', 'C' or 'D'.
-Language: chinese, acc: 53.72%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice question {}.
-Language: chinese, acc: 53.90%, prompt: Please choose the correct answer from 'A', 'B', 'C' or 'D' according to the following multiple-choice questions about {}.
-Language: chinese, acc: 53.90%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice questions about {}.
-Language: chinese, acc: 53.90%, prompt: Please select the correct answer from 'A', 'B', 'C' or 'D' for the following multiple choice questions about {}.
-Language: french, acc: 54.26%, prompt: For the following multiple choice question on {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: french, acc: 53.90%, prompt: This is a multiple choice question about {}. Select the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: french, acc: 53.90%, prompt: In the context of the multiple-choice question on {}, identify the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: french, acc: 54.08%, prompt: About the following question on {}, determine the correct answer from the choices 'A', 'B', 'C' or 'D'.
-Language: french, acc: 53.72%, prompt: Carefully review the multiple-choice question regarding {}. Choose the correct answer from options 'A', 'B', 'C', or 'D'.
-Language: french, acc: 53.55%, prompt: For the multiple-choice question for {}, indicate the correct answer from options 'A', 'B', 'C', or 'D'.
-Language: french, acc: 54.08%, prompt: The next question is about {}. Select the correct answer from the choices 'A', 'B', 'C' or 'D'.
-Language: french, acc: 54.26%, prompt: As part of the multiple-choice question on {}, choose the appropriate answer from options 'A', 'B', 'C' or 'D'.
-Language: french, acc: 53.72%, prompt: Rate your understanding of the multiple-choice question on {}. Choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: french, acc: 53.90%, prompt: Analyze the following multiple-choice question on {}. Identify the correct answer among choices 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.72%, prompt: For the multiple choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.90%, prompt: For the following multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 54.08%, prompt: For the following multiple choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.72%, prompt: When it comes to the multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.72%, prompt: For the multiple-choice question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.72%, prompt: If the question for {} is multiple choice, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 54.08%, prompt: For the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.72%, prompt: For the question about {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 53.37%, prompt: When it comes to the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: arabic, acc: 54.08%, prompt: For the question regarding {}, choose the correct answer from options 'A', 'B', 'C' or 'D'.
-Language: spanish, acc: 53.72%, prompt: For the following multiple-choice question about {}, choose the correct answer from 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 53.72%, prompt: For the following multiple-choice question about {}, select the correct answer from 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 53.72%, prompt: For the following multiple-choice question about {}, choose the correct answer from 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 54.26%, prompt: Within the context of the following multiple-choice question about {}, choose the correct option from 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 53.90%, prompt: For the following multiple-choice statement about {}, select the correct answer from 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 54.08%, prompt: Considering the following multiple-choice question about {}, mark the correct answer with 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 53.90%, prompt: For the following multiple-choice question about {}, choose the correct alternative among 'A', 'B', 'C' or 'D'.
-Language: spanish, acc: 53.90%, prompt: For the following multiple-choice statement about {}, choose the correct option from alternatives 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 53.90%, prompt: Within the context of the following multiple-choice question about {}, select the correct answer from alternatives 'A', 'B', 'C', or 'D'.
-Language: spanish, acc: 54.26%, prompt: Considering the following multiple-choice statement about {}, mark the correct alternative with the options 'A', 'B', 'C' or 'D'.
-Language: japanese, acc: 54.43%, prompt: Choose the appropriate answer from options 'A', 'B', 'C', or 'D' for {} regarding the following question.
-Language: japanese, acc: 54.26%, prompt: Choose the correct answer from 'A', 'B', 'C', or 'D' for the following multiple-choice question about {}.
-Language: japanese, acc: 54.26%, prompt: For the following multiple-choice questions about {}, choose the correct answer from 'A', 'B', 'C', or 'D'.
-Language: japanese, acc: 53.90%, prompt: Choose the correct answer from options 'A', 'B', 'C', or 'D' for the following questions about {}.
-Language: japanese, acc: 52.84%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'.
-Language: japanese, acc: 54.08%, prompt: Choose the correct answer from the options 'A', 'B', 'C', or 'D' for the following questions about {}.
-Language: japanese, acc: 52.84%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'.
-Language: japanese, acc: 54.43%, prompt: Choose the correct answer from 'A', 'B', 'C', or 'D' for the following multiple choice questions about {}.
-Language: japanese, acc: 52.84%, prompt: In the multiple choice questions below, choose the correct answer for {} from 'A', 'B', 'C', or 'D'.
-Language: japanese, acc: 54.61%, prompt: Choose the correct answer from options 'A', 'B', 'C', or 'D' for {} regarding the following question.
-Language: korean, acc: 53.19%, prompt: For the multiple choice problem about, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.37%, prompt: Choose the correct answer for '{}' from 'A', 'B', 'C', or 'D' in the multiple choice problem involving,
-Language: korean, acc: 53.19%, prompt: For the multiple choice problem below, choose the correct answer to '{}' from 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.37%, prompt: In the following multiple-choice problem, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.19%, prompt: For the following multiple choice problem, choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 54.43%, prompt: Solve multiple choice problems about: Which of 'A', 'B', 'C', or 'D' is the correct answer for '{}'.
-Language: korean, acc: 51.95%, prompt: Choose the correct answer to the multiple-choice question below. Is '{}' an 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.72%, prompt: Solve the following multiple-choice problem. Choose the correct answer for '{}' from 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.01%, prompt: Choose the correct answer to the following multiple choice problem: Is '{}' 'A', 'B', 'C', or 'D'.
-Language: korean, acc: 53.90%, prompt: Solve multiple-choice problems about: Please select 'A', 'B', 'C', or 'D' for the correct answer to '{}'.
-
-# squad_v2
-
-## 10 prompts
-
-Acc: 91.05%, prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Acc: 91.05%, prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Acc: 91.05%, prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Acc: 90.55%, prompt: Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'.
-Acc: 90.55%, prompt: Please extract the most appropriate answer from the context. If an answer is not present, indicate 'unanswerable'.
-Acc: 90.55%, prompt: Locate the most accurate answer within the context. If the context doesn't provide an answer, mention 'unanswerable'.
-Acc: 90.55%, prompt: Discover the best answer based on the context. If the context doesn't include an answer, say 'unanswerable'.
-Acc: 90.05%, prompt: Based on the given context, provide the best possible answer. If there's no answer available in the context, reply with 'unanswerable'.
-Acc: 90.05%, prompt: Using the context, determine the most suitable answer. If the context doesn't contain the answer, write 'unanswerable'.
-Acc: 89.88%, prompt: From the context, provide the most precise answer. If the answer is not in the context, respond with 'unanswerable'.
-
-Acc: 90.55%, prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Acc: 90.55%, prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Acc: 90.05%, prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Acc: 90.05%, prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'.
-Acc: 90.05%, prompt: As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Acc: 89.88%, prompt: As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable'.
-Acc: 89.88%, prompt: Using your knowledge of the context, identify the best answer to the question. If the context doesn't provide an answer, write 'unanswerable'.
-Acc: 89.88%, prompt: With your extensive knowledge of the context, answer the question accurately. If the context doesn't contain the answer, reply with 'unanswerable'.
-Acc: 89.55%, prompt: As a subject matter expert, extract the correct answer from the context. If an answer is not present, indicate 'unanswerable'.
-Acc: 89.38%, prompt: Drawing on your expertise in the context, provide the most precise answer. If the answer is not in the context, respond with 'unanswerable'.
-
-## bertattack
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Identify the most irrelevant answer from the context. If it's not possible to find an statement, begin 'unanswerable'."
-Original acc: 91.05%, attacked acc: 44.50%, dropped acc: 46.55%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"[UNK] derive the most fitting answer from the context. a there isn't an answer in the context, draw 'unanswerable'."
-Original acc: 91.05%, attacked acc: 88.99%, dropped acc: 2.06%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Search the context for the most irrelevant value. If the answer cannot be q, respond by doing 'unanswerable'."
-Original acc: 91.05%, attacked acc: 44.50%, dropped acc: 46.55%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"as well-versed in the context, please derive the most fitting answer. If there isn't an answer in the metre, use 'unanswerable'."
-Original acc: 90.55%, attacked acc: 89.38%, dropped acc: 1.17%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"and an authority on the context, designate the most inaccurate answer. find the context doesn't contain the answer, identify 'unanswerable'."
-Original acc: 90.05%, attacked acc: 44.50%, dropped acc: 45.55%
-
-Original prompt: As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'.
-Attacked prompt: b"[UNK] an expert with a deep understanding of the context, write the wrong ask. If the context doesn't include an query, add 'unanswerable'."
-Original acc: 90.05%, attacked acc: 44.50%, dropped acc: 45.55%
-
-## checklist
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable' 5yWbBXztUY."
-Original acc: 91.05%, attacked acc: 90.05%, dropped acc: 1.00%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable' 5XeflW1ZJc."
-Original acc: 91.05%, attacked acc: 90.55%, dropped acc: 0.50%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable' 9vGXgnbWB8."
-Original acc: 91.05%, attacked acc: 90.88%, dropped acc: 0.17%
-
-Original prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Attacked prompt: b"Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable' SvAp8RlOFn."
-Original acc: 90.55%, attacked acc: 89.38%, dropped acc: 1.17%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable' 6a4Yn3RGVc."
-Original acc: 90.55%, attacked acc: 89.55%, dropped acc: 1.00%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable' 6a4Yn3RGVc."
-Original acc: 90.05%, attacked acc: 89.55%, dropped acc: 0.50%
-
-## deepwordbug
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Identify the most rDlevant answer from the context. If it's not opssible to find an answer, tate 'unanswerable'."
-Original acc: 91.05%, attacked acc: 89.38%, dropped acc: 1.67%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Please derive the most fitting answer from the contjext. If there isn't an ansBer in the conteet, se 'unanswerable'."
-Original acc: 91.05%, attacked acc: 89.49%, dropped acc: 1.56%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Search the context for the most elevant answer. If the answer cannot be fund, respond by saying 'unanswerable'."
-Original acc: 91.05%, attacked acc: 90.05%, dropped acc: 1.00%
-
-Original prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Attacked prompt: b"DrawiBg upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'."
-Original acc: 90.55%, attacked acc: 89.55%, dropped acc: 1.00%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"eBing well-versed in the contet, please drive the most fitting answer. If there isn't an answer in the context, Euse 'unanswerable'."
-Original acc: 90.55%, attacked acc: 89.32%, dropped acc: 1.23%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'."
-Original acc: 90.05%, attacked acc: 90.05%, dropped acc: 0.00%
-
-## stresstest
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable' and true is true ."
-Original acc: 91.05%, attacked acc: 89.05%, dropped acc: 2.00%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable' and true is true and true is true and true is true and true is true and true is true ."
-Original acc: 91.05%, attacked acc: 90.99%, dropped acc: 0.06%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable' and false is not true ."
-Original acc: 91.05%, attacked acc: 89.88%, dropped acc: 1.17%
-
-Original prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Attacked prompt: b"Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable' and false is not true ."
-Original acc: 90.55%, attacked acc: 90.05%, dropped acc: 0.50%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable' and true is true and true is true and true is true and true is true and true is true ."
-Original acc: 90.55%, attacked acc: 90.49%, dropped acc: 0.06%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable' and false is not true ."
-Original acc: 90.05%, attacked acc: 89.55%, dropped acc: 0.50%
-
-## textbugger
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Identify the most reevant answer from the context. If it's not achievable to find an answer, state 'unanswerable'."
-Original acc: 91.05%, attacked acc: 89.38%, dropped acc: 1.67%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Invite derive the most fitting answer from the context. If there isn't an \xc9\x91nswer in the context, use 'unanswerable'."
-Original acc: 91.05%, attacked acc: 90.49%, dropped acc: 0.56%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Researching the context for the most relevant answer. If the an\xd1\x95wer cannot be found, respond by saying 'unanswerable'."
-Original acc: 91.05%, attacked acc: 90.38%, dropped acc: 0.67%
-
-Original prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Attacked prompt: b"Drawing afterward your expertise in the context, determine the most suitable answer. If an answer isn't available, countries 'unanswerable'."
-Original acc: 90.55%, attacked acc: 89.38%, dropped acc: 1.17%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Underway well-versed in the context, please subtract the most fitting answer. If there isn't an answer in the framework, use 'unanswerable'."
-Original acc: 90.55%, attacked acc: 80.92%, dropped acc: 9.63%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"As an authority on the context, locate the most precise answer. If the context doesn't contain the answr, mention 'unanswerable'."
-Original acc: 90.05%, attacked acc: 89.01%, dropped acc: 1.04%
-
-## textfooler
-
-Original prompt: Identify the most relevant answer from the context. If it's not possible to find an answer, state 'unanswerable'.
-Attacked prompt: b"Determine the most relevance rebuttal from the context. If it's not achievable to identify an answer, state 'unanswerable'."
-Original acc: 91.05%, attacked acc: 55.91%, dropped acc: 35.14%
-
-Original prompt: Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Ask subtract the most fitting rebuttal from the context. If there isn't an rebuttal in the context, use 'unanswerable'."
-Original acc: 91.05%, attacked acc: 55.32%, dropped acc: 35.72%
-
-Original prompt: Search the context for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.
-Attacked prompt: b"Examine the regard for the most relate answer. If the answer cannot become founded, respond by saying 'unanswerable'."
-Original acc: 91.05%, attacked acc: 89.38%, dropped acc: 1.67%
-
-Original prompt: Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.
-Attacked prompt: b"Drawing upon your acquaintances in the context, identify the most suitable rebuttal. If an answer isn't available, state 'unanswerable'."
-Original acc: 90.55%, attacked acc: 55.41%, dropped acc: 35.14%
-
-Original prompt: Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.
-Attacked prompt: b"Being well-versed in the context, please subtract the most fitting answer. If there isn't an rebuttal in the background, use 'unanswerable'."
-Original acc: 90.55%, attacked acc: 59.74%, dropped acc: 30.81%
-
-Original prompt: As an authority on the context, locate the most accurate answer. If the context doesn't contain the answer, mention 'unanswerable'.
-Attacked prompt: b"As an authority on the context, locate the most detailed answer. If the context doesn't contain the answering, mention 'unanswerable'."
-Original acc: 90.05%, attacked acc: 88.28%, dropped acc: 1.77%
-
-## translation
-
-Language: chinese, acc: 90.55%, prompt: Provide the most accurate answer from the context. If the answer is not in context, answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Please give the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Provide the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Please provide the most accurate answer from the context. If you cannot find the answer, please answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Give the most accurate answer based on the context. If you cannot find the answer, please answer 'unanswerable'.
-Language: chinese, acc: 90.55%, prompt: Please give the most accurate answer based on the context. If the answer is not in context, answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Provide the most accurate answer from the context. If you cannot find the answer, please answer 'unanswerable'.
-Language: chinese, acc: 90.55%, prompt: Please give the most accurate answer based on the context. If the answer cannot be found, please answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Provide the most accurate answer based on the context. If the answer cannot be found, please answer 'unanswerable'.
-Language: chinese, acc: 90.05%, prompt: Please provide the most accurate answer from the context. If the answer cannot be found, please answer 'unanswerable'.
-Language: french, acc: 90.55%, prompt: From the context, provide the most accurate answer. If the answer is not in context, answer with 'unanswerable'.
-Language: french, acc: 89.55%, prompt: From the context, give the most accurate answer. If the answer is not present in the context, answer with 'unanswerable'.
-Language: french, acc: 90.05%, prompt: Based on the context, provide the most accurate answer. If the answer is not in context, answer with 'unanswerable'.
-Language: french, acc: 90.05%, prompt: According to the context, give the most precise answer. If the answer is not present in the context, answer with 'unanswerable'.
-Language: french, acc: 89.55%, prompt: From the context, find the most accurate answer. If the answer is not in context, answer with 'unanswerable'.
-Language: french, acc: 90.05%, prompt: Based on the context, provide the most accurate answer. If the answer is not available in the context, answer with 'unanswerable'.
-Language: french, acc: 90.05%, prompt: According to the context, give the most precise answer. If the answer is not in the context, answer with 'unanswerable'.
-Language: french, acc: 89.55%, prompt: From the context, find the most accurate answer. If the answer is not present in the context, answer with 'unanswerable'.
-Language: french, acc: 89.55%, prompt: Based on the context, provide the most accurate answer. If the answer cannot be found in the context, answer with 'unanswerable'.
-Language: french, acc: 90.05%, prompt: According to the context, give the most precise answer. If the answer is not available in the context, answer with 'unanswerable'.
-Language: arabic, acc: 90.55%, prompt: From context, provide the most accurate answer. If not in context, please reply 'unanswerable',
-Language: arabic, acc: 91.05%, prompt: From context, what is the most likely outcome? If the answer is not in context, please reply 'unanswerable',
-Language: arabic, acc: 91.05%, prompt: From the given context, what is the key element that can be deduced? If the answer is not available in the context, please reply 'unanswerable',
-Language: arabic, acc: 90.88%, prompt: Based on the context given, what is the clear key idea? If the answer is not in context, please reply 'unanswerable',
-Language: arabic, acc: 90.48%, prompt: Based on the context, what is the most convincing explanation? If the answer is not available in the context, please reply 'unanswerable',
-Language: arabic, acc: 90.88%, prompt: Based on the context, what is the most likely outcome? If the answer is not available in the context, please reply 'unanswerable',
-Language: arabic, acc: 90.38%, prompt: Based on the context, which hypothesis is the most true? If the answer is not in context, please reply 'unanswerable',
-Language: arabic, acc: 90.05%, prompt: From context, what is the most apparent factor influencing? If the answer is not available in the context, please reply 'unanswerable',
-Language: arabic, acc: 90.05%, prompt: From context, provide the most accurate answer. If the answer is not in context, reply 'unanswerable',
-Language: arabic, acc: 89.55%, prompt: From context, determine the most accurate answer. If the answer is not available in context, answer 'unanswerable',
-Language: spanish, acc: 90.88%, prompt: Depending on the context, it provides the most precise answer. If the answer is not in context, answer with 'unanswerable'.
-Language: spanish, acc: 90.88%, prompt: Briefly describes the situation and provides the corresponding response. If the answer cannot be found, answer with 'unanswerable'.
-Language: spanish, acc: 90.88%, prompt: Given the information given, what is the most appropriate response? If the answer cannot be determined, answer with 'unanswerable'.
-Language: spanish, acc: 90.05%, prompt: Read the following text and give the most accurate answer. If you can't find the answer, answer with 'unanswerable'.
-Language: spanish, acc: 90.55%, prompt: Based on the description, what is the most accurate answer? If the answer is not found in the description, answer with 'unanswerable'.
-Language: spanish, acc: 91.05%, prompt: From the context provided, which response is the most appropriate? If the answer cannot be found, answer with 'unanswerable'.
-Language: spanish, acc: 89.55%, prompt: Analyze the following paragraph and provide the most accurate answer. If the answer is not in the paragraph, answer with 'unanswerable'.
-Language: spanish, acc: 91.05%, prompt: According to the information presented, what is the most precise answer? If the answer cannot be determined, answer with 'unanswerable'.
-Language: spanish, acc: 91.05%, prompt: After reading the excerpt, which do you think is the correct answer? If the answer cannot be discerned, answer with 'unanswerable'.
-Language: spanish, acc: 90.88%, prompt: Based on the context, it provides the most appropriate response. If the answer is not in context, answer with 'unanswerable'.
-Language: japanese, acc: 90.05%, prompt: Provide the most accurate answer from this context. If the answer isn't in the context, answer 'unanswerable'.
-Language: japanese, acc: 90.99%, prompt: Please provide the most appropriate answer based on the information specified in this sentence. If the answer is not in the text, answer 'unanswerable'.
-Language: japanese, acc: 90.55%, prompt: Please provide the most accurate answer based on the information guessed from this text. If the answer is not in the text, answer 'unanswerable'.
-Language: japanese, acc: 89.48%, prompt: Provide the most detailed answer based on the given context. If the answer is not in the context, answer 'unanswerable'.
-Language: japanese, acc: 89.55%, prompt: Consider the information derived from this context and provide the most accurate answer. If the answer is not in the context, answer 'unanswerable'.
-Language: japanese, acc: 90.55%, prompt: Based on this context, please provide the most appropriate answer. If the answer is not in the context, answer 'unanswerable'.
-Language: japanese, acc: 88.73%, prompt: Consider the information derived from the given text and provide the most detailed answer. If the answer is not in the text, please answer 'unanswerable'.
-Language: japanese, acc: 90.05%, prompt: Provide the most accurate answer based on the information given in this text. If the answer is not in the text, answer 'unanswerable'.
-Language: japanese, acc: 89.55%, prompt: Consider the information inferred from this context and provide the most appropriate answer. If the answer is not in the context, answer 'unanswerable'.
-Language: japanese, acc: 89.98%, prompt: Provide the most detailed answer based on this context. If the answer is not in the context, answer 'unanswerable'.
-Language: korean, acc: 90.05%, prompt: Give the most accurate answer in context. If the answer is not in context, respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Please provide additional information about the facts mentioned in this sentence. If no information is available, respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Please tell me what your question is about. If there is no context in which you can provide an answer, respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Please explain the concept mentioned in the following sentence. If there is no information on the concept, please respond with 'unanswerable'.
-Language: korean, acc: 91.38%, prompt: Tell me what you're comparing to in this sentence. If nothing is compared, please respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Please perform the actions required by the following context. If the task is not possible or if you are not clear what needs to be done, respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Figure out what information this sentence contains. If no information is available, respond with 'unanswerable'.
-Language: korean, acc: 91.05%, prompt: Please give a solution to what kind of problem in the following sentence. If there is no solution, respond with 'unanswerable'.
-Language: korean, acc: 90.88%, prompt: Please give the cause of the incident mentioned in the context. If the cause is not clear, respond with 'unanswerable'.
-Language: korean, acc: 90.38%, prompt: Give expected results in the following sentences. If the result is unpredictable, respond with 'unanswerable'.
-
-# iwslt
-
-## 10 prompts
-
-Acc: 0.27%, prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Acc: 0.24%, prompt: Convert the following sentence from its original {} language to the target language {}.
-Acc: 0.24%, prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Acc: 0.24%, prompt: Using machine translation, convert the given sentence from {} into the {} language.
-Acc: 0.24%, prompt: Please perform a translation of the given sentence, converting it from {} to {}.
-Acc: 0.24%, prompt: Given the sentence below, perform a machine translation from {} to {}.
-Acc: 0.24%, prompt: Translate the subsequent text passage from its original {} language to the {} language.
-Acc: 0.24%, prompt: Translate the provided sentence from {} to {} while maintaining the original meaning and context.
-Acc: 0.23%, prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Acc: 0.23%, prompt: Translate the following text from the source language {} to the target language {}.
-
-Acc: 0.27%, prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Acc: 0.27%, prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Acc: 0.27%, prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Acc: 0.25%, prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}.
-Acc: 0.24%, prompt: Assuming the role of an expert language translator, translate the given text from {} to {}.
-Acc: 0.24%, prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Acc: 0.24%, prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Acc: 0.24%, prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.
-Acc: 0.23%, prompt: While acting as an experienced language expert, translate the following text from {} to {}.
-Acc: 0.23%, prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-
-## bertattack
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'i translate the sentence from {} to {}, assumed the meaning remains intact. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language {}. '
-Original acc: 0.24%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'create a machine translation for the provided verb, setting it from {} to {}. '
-Original acc: 0.24%, attacked acc: 0.23%, dropped acc: 0.01%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'she on the role of a certified translation, and a translation of the given sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'Assuming the responsibilities of a professional syriac, translate the subsequent reading verse from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'As a level specialist, just translate the provided sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.03%
-
-## checklist
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'Accurately translate the sentence from {} to {}, ensuring the meaning remains intact S6oUV5E54P. '
-Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language lWJoGGRdjv {}. '
-Original acc: 0.24%, attacked acc: 0.19%, dropped acc: 0.05%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to mOJ9nEwT4f {}. '
-Original acc: 0.24%, attacked acc: 0.21%, dropped acc: 0.03%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'Taking on the role of a certified translator, perform a translation of the given sentence from {} to njCuciQJpB {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to xikCjkMydH {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'As a language specialist, accurately translate the provided sentence from {} to g0vBZf3tQC {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-## deepwordbug
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'Accurately translate the seneence from {} to {}, enusring the emaning remaiOs intat. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language {}. '
-Original acc: 0.24%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Preform a machine translatino for the provied senteXce, chanigng it from {} to {}. '
-Original acc: 0.24%, attacked acc: 0.19%, dropped acc: 0.05%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'Tking on the role of a Aertified tranolator, pefrorm a transAlation of the gUven sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.21%, dropped acc: 0.06%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'Assuming the responsibiljities of a professional translaZor, translate the subsequnet Gext pSssage from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'sA a languAage specialits, accurtely translate the prpvided senpence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.21%, dropped acc: 0.05%
-
-## stresstest
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'Accurately translate the sentence from {} to {}, ensuring the meaning remains intact and true is true and true is true and true is true and true is true and true is true . '
-Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language and true is true {}. '
-Original acc: 0.24%, attacked acc: 0.20%, dropped acc: 0.04%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to and true is true and true is true and true is true and true is true and true is true {}. '
-Original acc: 0.24%, attacked acc: 0.19%, dropped acc: 0.05%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'Taking on the role of a certified translator, perform a translation of the given sentence from {} to and true is true and true is true and true is true and true is true and true is true {}. '
-Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to and true is true and true is true and true is true and true is true and true is true {}. '
-Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'As a language specialist, accurately translate the provided sentence from {} to and true is true and true is true and true is true and true is true and true is true {}. '
-Original acc: 0.27%, attacked acc: 0.20%, dropped acc: 0.07%
-
-## textbugger
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'Ac\xcf\xb2urately translate the sentence from {} to {}, ensuring the meaning remains unaffected. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language {}. '
-Original acc: 0.24%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perfo\xe2\xb2\x85m a machine translation for the provided sent\xd0\xb5nce, changing it from {} to {}. '
-Original acc: 0.24%, attacked acc: 0.23%, dropped acc: 0.01%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'Taking on the role of a certifying translators, pefrorm a translati\xd0\xben of the giv\xd0\xb5n sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'As suming the responsibilities of a professional tra\xd5\xb8slator, translate the subsequent text passage from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'As a languag\xd0\xb5 speciali\xd1\x95t, accurately translate the provided sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.24%, dropped acc: 0.03%
-
-## textfooler
-
-Original prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Attacked prompt: b'Rightly translate the convicts from {} to {}, insurance the signify sojourn unaffected. '
-Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08%
-
-Original prompt: Convert the following sentence from its original {} language to the target language {}.
-Attacked prompt: b'Convert the following sentence from its original {} language to the target language {}. '
-Original acc: 0.24%, attacked acc: 0.24%, dropped acc: 0.03%
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Achieving a equipment translation for the provided chastisement, adaptations it from {} to {}. '
-Original acc: 0.24%, attacked acc: 0.19%, dropped acc: 0.05%
-
-Original prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Attacked prompt: b'Taking on the role of a certified translator, satisfy a converting of the given sentence from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.23%, dropped acc: 0.04%
-
-Original prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Attacked prompt: b'Adopt the indebted of a vocational artist, translate the subsequent laws transition from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08%
-
-Original prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Attacked prompt: b'Oj a terminology scholar, rightly translate the afforded sorrows from {} to {}. '
-Original acc: 0.27%, attacked acc: 0.19%, dropped acc: 0.08%
-
-## translation
-
-Language: chinese, acc: 0.23%, prompt: Please translate the given sentence into {} to {}.
-Language: chinese, acc: 0.23%, prompt: Please translate the following sentences from {} to {}.
-Language: chinese, acc: 0.23%, prompt: Please convert the following sentences to {} and translate to {}.
-Language: chinese, acc: 0.24%, prompt: Please convert the given sentence from {} to {}.
-Language: chinese, acc: 0.22%, prompt: Please translate the next sentence from {} to {}.
-Language: chinese, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: chinese, acc: 0.24%, prompt: Please translate the sentences given into {} and convert them into {}.
-Language: chinese, acc: 0.24%, prompt: Please convert the sentences given to {} to {}.
-Language: chinese, acc: 0.24%, prompt: Please translate the following sentences into {} and convert them into {}.
-Language: chinese, acc: 0.23%, prompt: Please change the given sentence from {} to {}.
-Language: french, acc: 0.24%, prompt: Please translate the given sentence, converting it from {} to {}.
-Language: french, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: french, acc: 0.23%, prompt: Please turn the sentence below into {}, then translate it into {}.
-Language: french, acc: 0.24%, prompt: Please convert the given phrase from {} to {}.
-Language: french, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: french, acc: 0.23%, prompt: Please translate the sentence below from {} to {}.
-Language: french, acc: 0.24%, prompt: Please translate the given sentence to {}, then convert it to {}.
-Language: french, acc: 0.24%, prompt: Please make a translation of the supplied sentence, transforming it from {} to {}.
-Language: french, acc: 0.24%, prompt: Please translate the following sentence to {}, then convert it to {}.
-Language: french, acc: 0.24%, prompt: Please transform the given sentence from {} to {}.
-Language: arabic, acc: 0.24%, prompt: Please translate the given sentence, and convert it from {} to {},
-Language: arabic, acc: 0.23%, prompt: Please translate the following sentence from {} to {},
-Language: arabic, acc: 0.23%, prompt: Please convert the sentence below to {}, and then translate it to {},
-Language: arabic, acc: 0.23%, prompt: Please convert the given sentence from {} to {},
-Language: arabic, acc: 0.23%, prompt: Please translate the following sentence from {} to {},
-Language: arabic, acc: 0.23%, prompt: Please convert the sentence below from {} to {},
-Language: arabic, acc: 0.24%, prompt: Please translate the given sentence to {}, then convert it to {},
-Language: arabic, acc: 0.24%, prompt: Please translate the given sentence, and convert it from {} to {},
-Language: arabic, acc: 0.24%, prompt: Please translate to {}, then convert to {},
-Language: arabic, acc: 0.24%, prompt: Please convert the given sentence from {} to {}.
-Language: spanish, acc: 0.24%, prompt: Please make a translation of the provided phrase, converting it from {} to {}.
-Language: spanish, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: spanish, acc: 0.23%, prompt: Please convert the next sentence to {}, and then translate it to {}.
-Language: spanish, acc: 0.24%, prompt: Please make a translation of the given phrase, converting it from {} to {}.
-Language: spanish, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: spanish, acc: 0.23%, prompt: Please convert the following sentence from {} to {}.
-Language: spanish, acc: 0.24%, prompt: Please translate the sentence provided to {}, and then turn it to {}.
-Language: spanish, acc: 0.24%, prompt: Please make a translation of the following sentence, converting it from {} to {}.
-Language: spanish, acc: 0.23%, prompt: Please translate the next sentence to {}, and then turn it to {}.
-Language: spanish, acc: 0.24%, prompt: Please convert the given sentence from {} to {}.
-Language: japanese, acc: 0.23%, prompt: Please translate the given sentence from {} to {}.
-Language: japanese, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: japanese, acc: 0.23%, prompt: Please convert the following sentences into {} and translate them into {}.
-Language: japanese, acc: 0.24%, prompt: Please translate the given sentence by converting {} to {}.
-Language: japanese, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: japanese, acc: 0.24%, prompt: Please convert the following sentences from {} to {}.
-Language: japanese, acc: 0.23%, prompt: Translate the given sentence into {} and convert it to {}.
-Language: japanese, acc: 0.23%, prompt: Please translate the given sentence from {} to {}.
-Language: japanese, acc: 0.24%, prompt: Translate the following sentence into {} and convert it to {}.
-Language: japanese, acc: 0.24%, prompt: Convert the given statement from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the given sentence from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the sentences below into {}, then {}.
-Language: korean, acc: 0.23%, prompt: Please translate the given sentences from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the following sentence from {} to {}.
-Language: korean, acc: 0.24%, prompt: Please convert the sentences below from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the given sentence into {}, then {}.
-Language: korean, acc: 0.23%, prompt: Please translate the given sentence from {} to {}.
-Language: korean, acc: 0.23%, prompt: Please translate the following sentences into {}, then {}.
-Language: korean, acc: 0.24%, prompt: Please convert the given sentence from {} to {}.
-
-# un_multi
-
-## 10 prompts
-
-Acc: 0.36%, prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Acc: 0.36%, prompt: Given the sentence below, perform a machine translation from {} to {}.
-Acc: 0.36%, prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Acc: 0.36%, prompt: Translate the subsequent text passage from its original {} language to the {} language.
-Acc: 0.36%, prompt: Translate the provided sentence from {} to {} while maintaining the original meaning and context.
-Acc: 0.36%, prompt: Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.
-Acc: 0.35%, prompt: Using machine translation, convert the given sentence from {} into the {} language.
-Acc: 0.35%, prompt: Translate the following text from the source language {} to the target language {}.
-Acc: 0.35%, prompt: Please perform a translation of the given sentence, converting it from {} to {}.
-Acc: 0.35%, prompt: Convert the following sentence from its original {} language to the target language {}.
-
-Acc: 0.36%, prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Acc: 0.36%, prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Acc: 0.36%, prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Acc: 0.36%, prompt: As a language specialist, accurately translate the provided sentence from {} to {}.
-Acc: 0.36%, prompt: While acting as an experienced language expert, translate the following text from {} to {}.
-Acc: 0.36%, prompt: In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.
-Acc: 0.36%, prompt: Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.
-Acc: 0.36%, prompt: Assuming the role of an expert language translator, translate the given text from {} to {}.
-Acc: 0.35%, prompt: Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.
-Acc: 0.35%, prompt: As a qualified translator, convert the given sentence from its original {} language to the target language {}.
-
-## bertattack
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'perform a machine translation for the displayed sentence, export it from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Given the sentence below, for a machine operator from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the later sentence from its source rule {} into the desired language {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'd a skilled linguist, right example the following sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'if the capacity of a capable translator, leave changed the following sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'his as a qualified translator, translate the provided sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-## checklist
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to EAB4KP2NVY {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Given the sentence below, perform a machine translation from {} to Kw6nrs57gH {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the subsequent sentence from its source language {} into the desired language 5XeflW1ZJc {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'As a skilled linguist, please convert the following sentence from {} to AMsRIKZniY {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'In the capacity of a proficient translator, please change the following sentence from {} to vTAjHynoIG {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'Acting as a professional translator, translate the provided sentence from {} to vTAjHynoIG {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-## deepwordbug
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'PeVform a machine transAation for the provide setence, changlng it from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Gvien the sentence below, perform a machine translation from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the subsMquent sentejce from its source lasguage {} into the desired glanguage {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'AK a skilled linguist, please onvert the following sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'n the capaciyt of a proficient translator, Pplease chagne the following sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'cAting as a professional transaltor, translate the provided sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-## stresstest
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perform a machine translation for the provided sentence, changing it from {} to and false is not true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Given the sentence below, perform a machine translation from {} to and true is true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the subsequent sentence from its source language {} into the desired language and false is not true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'As a skilled linguist, please convert the following sentence from {} to and true is true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'In the capacity of a proficient translator, please change the following sentence from {} to and false is not true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'Acting as a professional translator, translate the provided sentence from {} to and true is true and true is true and true is true and true is true and true is true {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-## textbugger
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Preform a machinery translation for the provid\xd0\xb5d sentence, changing it from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Given the sentence below, perform a equipment translators from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the subsequent condemnation from its source language {} into the desired language {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'As a skillful linguist, pelase conv\xd0\xb5rt the following sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'In the capacity of a proficient translator, please change the following snetence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.00%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'Acting as a professional tra\xd5\xb8slator, translate the \xd1\x80rovided sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-## textfooler
-
-Original prompt: Perform a machine translation for the provided sentence, changing it from {} to {}.
-Attacked prompt: b'Perform a computer translate for the tabled sentence, changing it from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Given the sentence below, perform a machine translation from {} to {}.
-Attacked prompt: b'Given the sorrows below, fulfillment a device translation from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Translate the subsequent sentence from its source language {} into the desired language {}.
-Attacked prompt: b'Translate the forth sanction from its source dictionary {} into the desired language {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: As a skilled linguist, please convert the following sentence from {} to {}.
-Attacked prompt: b'As a skilled linguist, please translate the following punishment from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: In the capacity of a proficient translator, please change the following sentence from {} to {}.
-Attacked prompt: b'In the ability of a proficient translator, please variant the following pity from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-Original prompt: Acting as a professional translator, translate the provided sentence from {} to {}.
-Attacked prompt: b'Acting as a professional artists, translate the envisaged sentence from {} to {}. '
-Original acc: 0.36%, attacked acc: 0.35%, dropped acc: 0.01%
-
-## translation
-
-Language: chinese, acc: 0.36%, prompt: Please translate the given sentence into {} to {}.
-Language: chinese, acc: 0.36%, prompt: Please translate the following sentences from {} to {}.
-Language: chinese, acc: 0.35%, prompt: Please convert the following sentences to {} and translate to {}.
-Language: chinese, acc: 0.36%, prompt: Please convert the given sentence from {} to {}.
-Language: chinese, acc: 0.35%, prompt: Please translate the next sentence from {} to {}.
-Language: chinese, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: chinese, acc: 0.36%, prompt: Please translate the sentences given into {} and convert them into {}.
-Language: chinese, acc: 0.36%, prompt: Please convert the sentences given to {} to {}.
-Language: chinese, acc: 0.36%, prompt: Please translate the following sentences into {} and convert them into {}.
-Language: chinese, acc: 0.36%, prompt: Please change the given sentence from {} to {}.
-Language: french, acc: 0.36%, prompt: Please translate the given sentence, converting it from {} to {}.
-Language: french, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: french, acc: 0.36%, prompt: Please turn the sentence below into {}, then translate it into {}.
-Language: french, acc: 0.36%, prompt: Please convert the given phrase from {} to {}.
-Language: french, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: french, acc: 0.36%, prompt: Please translate the sentence below from {} to {}.
-Language: french, acc: 0.36%, prompt: Please translate the given sentence to {}, then convert it to {}.
-Language: french, acc: 0.35%, prompt: Please make a translation of the supplied sentence, transforming it from {} to {}.
-Language: french, acc: 0.36%, prompt: Please translate the following sentence to {}, then convert it to {}.
-Language: french, acc: 0.36%, prompt: Please transform the given sentence from {} to {}.
-Language: arabic, acc: 0.36%, prompt: Please translate the given sentence, and convert it from {} to {},
-Language: arabic, acc: 0.35%, prompt: Please translate the following sentence from {} to {},
-Language: arabic, acc: 0.35%, prompt: Please convert the sentence below to {}, and then translate it to {},
-Language: arabic, acc: 0.36%, prompt: Please convert the given sentence from {} to {},
-Language: arabic, acc: 0.35%, prompt: Please translate the following sentence from {} to {},
-Language: arabic, acc: 0.35%, prompt: Please convert the sentence below from {} to {},
-Language: arabic, acc: 0.36%, prompt: Please translate the given sentence to {}, then convert it to {},
-Language: arabic, acc: 0.36%, prompt: Please translate the given sentence, and convert it from {} to {},
-Language: arabic, acc: 0.36%, prompt: Please translate to {}, then convert to {},
-Language: arabic, acc: 0.36%, prompt: Please convert the given sentence from {} to {}.
-Language: spanish, acc: 0.36%, prompt: Please make a translation of the provided phrase, converting it from {} to {}.
-Language: spanish, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: spanish, acc: 0.35%, prompt: Please convert the next sentence to {}, and then translate it to {}.
-Language: spanish, acc: 0.36%, prompt: Please make a translation of the given phrase, converting it from {} to {}.
-Language: spanish, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: spanish, acc: 0.35%, prompt: Please convert the following sentence from {} to {}.
-Language: spanish, acc: 0.36%, prompt: Please translate the sentence provided to {}, and then turn it to {}.
-Language: spanish, acc: 0.36%, prompt: Please make a translation of the following sentence, converting it from {} to {}.
-Language: spanish, acc: 0.35%, prompt: Please translate the next sentence to {}, and then turn it to {}.
-Language: spanish, acc: 0.36%, prompt: Please convert the given sentence from {} to {}.
-Language: japanese, acc: 0.36%, prompt: Please translate the given sentence from {} to {}.
-Language: japanese, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: japanese, acc: 0.35%, prompt: Please convert the following sentences into {} and translate them into {}.
-Language: japanese, acc: 0.36%, prompt: Please translate the given sentence by converting {} to {}.
-Language: japanese, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: japanese, acc: 0.36%, prompt: Please convert the following sentences from {} to {}.
-Language: japanese, acc: 0.35%, prompt: Translate the given sentence into {} and convert it to {}.
-Language: japanese, acc: 0.36%, prompt: Please translate the given sentence from {} to {}.
-Language: japanese, acc: 0.36%, prompt: Translate the following sentence into {} and convert it to {}.
-Language: japanese, acc: 0.35%, prompt: Convert the given statement from {} to {}.
-Language: korean, acc: 0.36%, prompt: Please translate the given sentence from {} to {}.
-Language: korean, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: korean, acc: 0.35%, prompt: Please translate the sentences below into {}, then {}.
-Language: korean, acc: 0.36%, prompt: Please translate the given sentences from {} to {}.
-Language: korean, acc: 0.35%, prompt: Please translate the following sentence from {} to {}.
-Language: korean, acc: 0.36%, prompt: Please convert the sentences below from {} to {}.
-Language: korean, acc: 0.35%, prompt: Please translate the given sentence into {}, then {}.
-Language: korean, acc: 0.36%, prompt: Please translate the given sentence from {} to {}.
-Language: korean, acc: 0.35%, prompt: Please translate the following sentences into {}, then {}.
-Language: korean, acc: 0.36%, prompt: Please convert the given sentence from {} to {}.
-
-# math
-
-## 10 prompts
-
-Acc: 16.88%, prompt: Figure out the solution for the following mathematical task with {}.
-Acc: 16.25%, prompt: Solve the following math question about {}.
-Acc: 16.25%, prompt: Calculate the answer to the following math query about {}.
-Acc: 16.25%, prompt: Compute the result of this math task concerning {}.
-Acc: 16.25%, prompt: Resolve the following mathematical question associated with {}.
-Acc: 16.25%, prompt: Obtain the result for this math question regarding {}.
-Acc: 16.25%, prompt: Evaluate the following mathematical problem that includes {}.
-Acc: 15.62%, prompt: Determine the solution to this mathematical problem related to {}.
-Acc: 15.62%, prompt: Find the solution for this mathematical challenge with {}.
-Acc: 15.62%, prompt: Work out the answer to this math problem featuring {}.
-
-Acc: 16.88%, prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Acc: 16.25%, prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Acc: 16.25%, prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Acc: 16.25%, prompt: In the shoes of a mathematics master, obtain the result for this math question that includes {}.
-Acc: 15.62%, prompt: Imagine being a math mentor and determine the solution to the following problem with {}.
-Acc: 15.62%, prompt: In the position of a mathematics coach, figure out the answer to this math challenge involving {}.
-Acc: 15.62%, prompt: As a math guru, evaluate the following mathematical task regarding {}.
-Acc: 15.00%, prompt: As a mathematics instructor, calculate the answer to the following problem related to {}.
-Acc: 15.00%, prompt: Taking on the role of a math specialist, work out the solution for this mathematical question featuring {}.
-Acc: 14.37%, prompt: Embodying a mathematics advisor, find the answer to the following math problem with {}.
-
-## bertattack
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the solution for the following arithmetic task with {}. '
-Original acc: 16.88%, attacked acc: 13.75%, dropped acc: 3.12%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'if the following math question about {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'if the answer to the following math query about {}. '
-Original acc: 16.25%, attacked acc: 13.75%, dropped acc: 2.50%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Pretend you are a math teacher and solve this correct table concerning {}. '
-Original acc: 16.88%, attacked acc: 14.37%, dropped acc: 2.50%
-
-Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math team and correct the result of the following mathematical task about {}. '
-Original acc: 16.25%, attacked acc: 12.50%, dropped acc: 3.75%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'and a mathematics consultant, identify the following problem associated with {}. '
-Original acc: 16.25%, attacked acc: 14.37%, dropped acc: 1.88%
-
-## checklist
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the solution for the following mathematical task with LkF0FZxMZ4 {}. '
-Original acc: 16.88%, attacked acc: 15.00%, dropped acc: 1.88%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'Solve the following math question about Cev0s49fbe {}. '
-Original acc: 16.25%, attacked acc: 15.62%, dropped acc: 0.63%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'Calculate the answer to the following math query about 5XeflW1ZJc {}. '
-Original acc: 16.25%, attacked acc: 15.62%, dropped acc: 0.63%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Pretend you are a math teacher and solve this mathematical question concerning 5XeflW1ZJc {}. '
-Original acc: 16.88%, attacked acc: 16.25%, dropped acc: 0.63%
-
-Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math expert and compute the result of the following mathematical task about 5XeflW1ZJc {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'As a mathematics consultant, resolve the following problem associated with EAB4KP2NVY {}. '
-Original acc: 16.25%, attacked acc: 16.25%, dropped acc: 0.00%
-
-## deepwordbug
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the solution for the following maKthematical Utask with {}. '
-Original acc: 16.88%, attacked acc: 14.37%, dropped acc: 2.50%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'Solve the following math question about {}. '
-Original acc: 16.25%, attacked acc: 16.25%, dropped acc: 0.63%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'Calculate the answer to the followng math query about {}. '
-Original acc: 16.25%, attacked acc: 15.62%, dropped acc: 0.63%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Petend you are a math teacher and solve this mathematical euestion concCerning {}. '
-Original acc: 16.88%, attacked acc: 15.00%, dropped acc: 1.88%
-
-Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math expret and compute the result of the following mathematical task about {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'A a mathematics consultant, resolve the following problem associated with {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-## stresstest
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the solution for the following mathematical task with and false is not true {}. '
-Original acc: 16.88%, attacked acc: 13.75%, dropped acc: 3.12%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'Solve the following math question about and false is not true {}. '
-Original acc: 16.25%, attacked acc: 12.50%, dropped acc: 3.75%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'Calculate the answer to the following math query about and false is not true {}. '
-Original acc: 16.25%, attacked acc: 13.75%, dropped acc: 2.50%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Pretend you are a math teacher and solve this mathematical question concerning and false is not true {}. '
-Original acc: 16.88%, attacked acc: 12.50%, dropped acc: 4.38%
-
-Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math expert and compute the result of the following mathematical task about and false is not true {}. '
-Original acc: 16.25%, attacked acc: 12.50%, dropped acc: 3.75%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'As a mathematics consultant, resolve the following problem associated with and false is not true {}. '
-Original acc: 16.25%, attacked acc: 13.12%, dropped acc: 3.12%
-
-## textbugger
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the solution for the followin\xc9\xa1 mathematical task with {}. '
-Original acc: 16.88%, attacked acc: 15.00%, dropped acc: 1.88%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'Sovle the following math question about {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'Calculate the replying to the following math query about {}. '
-Original acc: 16.25%, attacked acc: 15.62%, dropped acc: 0.63%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Pretend you are a math teacher and settle this matehmatical question on {}. '
-Original acc: 16.88%, attacked acc: 13.75%, dropped acc: 3.12%
-
-Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math expret and compute the result of the following mathematical task about {}. '
-Original acc: 16.25%, attacked acc: 15.00%, dropped acc: 1.25%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'As a math consultant, resolve the following poblem associated with {}. '
-Original acc: 16.25%, attacked acc: 14.37%, dropped acc: 1.88%
-
-## textfooler
-
-Original prompt: Figure out the solution for the following mathematical task with {}.
-Attacked prompt: b'Figure out the address for the farther arithmetic task with {}. '
-Original acc: 16.88%, attacked acc: 14.37%, dropped acc: 2.50%
-
-Original prompt: Solve the following math question about {}.
-Attacked prompt: b'Solve the following math question about {}. '
-Original acc: 16.25%, attacked acc: 16.25%, dropped acc: 0.63%
-
-Original prompt: Calculate the answer to the following math query about {}.
-Attacked prompt: b'Estimate the rebuttal to the following math view about {}. '
-Original acc: 16.25%, attacked acc: 10.00%, dropped acc: 6.25%
-
-Original prompt: Pretend you are a math teacher and solve this mathematical question concerning {}.
-Attacked prompt: b'Pretend you are a math teacher and fix this mathematical question concerning {}. '
-Original acc: 16.88%, attacked acc: 15.00%, dropped acc: 1.88%Original prompt: Act as a math expert and compute the result of the following mathematical task about {}.
-Attacked prompt: b'Act as a math expert and estimate the conclusion of the following mathematical task about {}. '
-Original acc: 16.25%, attacked acc: 13.75%, dropped acc: 2.50%
-
-Original prompt: As a mathematics consultant, resolve the following problem associated with {}.
-Attacked prompt: b'As a math consultant, resolve the following difficulty associated with {}. '
-Original acc: 16.25%, attacked acc: 13.75%, dropped acc: 2.50%
-
-## translation
-
-Language: chinese, acc: 13.75%, prompt: Calculate the answer to a mathematical question about {}.
-Language: chinese, acc: 10.62%, prompt: Calculate the value of {}.
-Language: chinese, acc: 12.50%, prompt: Please calculate the result of {}.
-Language: chinese, acc: 14.37%, prompt: According to {}, solve the following mathematical problem.
-Language: chinese, acc: 11.88%, prompt: Do the math with {}.
-Language: chinese, acc: 13.75%, prompt: Solve mathematical problems involving {}.
-Language: chinese, acc: 13.12%, prompt: Please calculate the answer involving {}.
-Language: chinese, acc: 13.12%, prompt: Solve for the following values based on {}.
-Language: chinese, acc: 13.75%, prompt: Calculate the following mathematical tasks using {}.
-Language: chinese, acc: 12.50%, prompt: Calculate the answer to the {} related question.
-Language: french, acc: 13.75%, prompt: Calculate the answer to the following mathematical question concerning {}.
-Language: french, acc: 12.50%, prompt: Calculate the result of {}.
-Language: french, acc: 10.62%, prompt: Please calculate the value of {}.
-Language: french, acc: 14.37%, prompt: According to {}, solve the following mathematical problem.
-Language: french, acc: 14.37%, prompt: Perform mathematical calculations with {}.
-Language: french, acc: 14.37%, prompt: Solve the mathematical problem involving {}.
-Language: french, acc: 12.50%, prompt: Please calculate the answer related to {}.
-Language: french, acc: 10.00%, prompt: According to {}, set the following value.
-Language: french, acc: 11.88%, prompt: Perform the following mathematical task using {}.
-Language: french, acc: 13.75%, prompt: Calculate the answer to the questions related to {}.
-Language: arabic, acc: 14.37%, prompt: Compute the answer to the next mathematical question about {}.
-Language: arabic, acc: 13.75%, prompt: Calculate {}.
-Language: arabic, acc: 13.75%, prompt: Please calculate {}.
-Language: arabic, acc: 14.37%, prompt: According to {}, solve the following mathematical problem.
-Language: arabic, acc: 11.88%, prompt: Do mathematical calculations using {}.
-Language: arabic, acc: 14.37%, prompt: A solution to the mathematical problem involving {}.
-Language: arabic, acc: 11.88%, prompt: Please calculate the answer regarding {}.
-Language: arabic, acc: 13.12%, prompt: According to {}, determine the next value.
-Language: arabic, acc: 13.12%, prompt: DO THE NEXT MATHEMATICAL JOB USING {}.
-Language: arabic, acc: 13.75%, prompt: Calculate the answer to questions related to {}.
-Language: spanish, acc: 12.50%, prompt: Compute the answer to the following mathematical question on {}.
-Language: spanish, acc: 12.50%, prompt: Compute the result of {}.
-Language: spanish, acc: 10.62%, prompt: Please calculate the value of {}.
-Language: spanish, acc: 13.12%, prompt: As {}, it solves the following mathematical problem.
-Language: spanish, acc: 14.37%, prompt: Performs mathematical calculations using {}.
-Language: spanish, acc: 14.37%, prompt: Solve the mathematical problem involving {}.
-Language: spanish, acc: 12.50%, prompt: Please calculate the answer related to {}.
-Language: spanish, acc: 12.50%, prompt: As {}, determine the next value.
-Language: spanish, acc: 11.88%, prompt: Perform the following mathematical task using {}.
-Language: spanish, acc: 14.37%, prompt: Compute the answer to questions related to {}.
-Language: japanese, acc: 14.37%, prompt: Calculate the answers to the math questions about {}.
-Language: japanese, acc: 10.62%, prompt: Calculate the value of {}.
-Language: japanese, acc: 12.50%, prompt: Please find the answer to {}.
-Language: japanese, acc: 15.62%, prompt: Based on {}, please solve the following mathematical problems.
-Language: japanese, acc: 15.00%, prompt: Use {} to perform mathematical calculations.
-Language: japanese, acc: 13.12%, prompt: Please solve the math problem that contains {}.
-Language: japanese, acc: 13.12%, prompt: Please calculate the answers related to {}.
-Language: japanese, acc: 15.62%, prompt: Based on {}, find the following values:
-Language: japanese, acc: 13.75%, prompt: Use {} to solve the following mathematical problem.
-Language: japanese, acc: 13.75%, prompt: Please calculate the answers to the questions related to {}.
-Language: korean, acc: 13.75%, prompt: Calculate the answer of the following math problem to {}.
-Language: korean, acc: 12.50%, prompt: Calculate the result of {}.
-Language: korean, acc: 10.62%, prompt: Please calculate the value of {}.
-Language: korean, acc: 13.12%, prompt: Work out the following math problems according to {}.
-Language: korean, acc: 14.37%, prompt: Use {} to proceed with mathematical calculations.
-Language: korean, acc: 13.75%, prompt: Work out a math problem involving {}.
-Language: korean, acc: 11.88%, prompt: Please calculate the answer to {}.
-Language: korean, acc: 11.25%, prompt: Try to get the following values according to {}.
-Language: korean, acc: 11.25%, prompt: Work out the next math task using {}.
-Language: korean, acc: 13.12%, prompt: Calculate the answer of the problem involving {}.
\ No newline at end of file
diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/nn/__init__.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/nn/__init__.py
deleted file mode 100644
index 3ea59ce898c8425482ad16027be8cd01a6c7b993..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/nn/__init__.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# encoding: utf-8
-# pylint: disable=no-member
-# pylint: disable=invalid-name
-# pylint: disable=too-many-arguments
-# pylint: disable=too-few-public-methods
-"""
-Neural Network package.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import numpy as np
-
-from . import layers, activations
-from ...processors import Processor, ParallelProcessor, SequentialProcessor
-
-
-def average_predictions(predictions):
- """
- Returns the average of all predictions.
-
- Parameters
- ----------
- predictions : list
- Predictions (i.e. NN activation functions).
-
- Returns
- -------
- numpy array
- Averaged prediction.
-
- """
- # average predictions if needed
- if len(predictions) > 1:
- # average the predictions
- predictions = sum(predictions) / len(predictions)
- else:
- # nothing to average since we have only one prediction
- predictions = predictions[0]
- # return the (averaged) predictions
- return predictions
-
-
-class NeuralNetwork(Processor):
- """
- Neural Network class.
-
- Parameters
- ----------
- layers : list
- Layers of the Neural Network.
-
- Examples
- --------
- Create a NeuralNetwork from the given layers.
-
- >>> from madmom.ml.nn.layers import FeedForwardLayer
- >>> from madmom.ml.nn.activations import tanh, sigmoid
- >>> l1_weights = np.array([[0.5, -1., -0.3 , -0.2]])
- >>> l1_bias = np.array([0.05, 0., 0.8, -0.5])
- >>> l1 = FeedForwardLayer(l1_weights, l1_bias, activation_fn=tanh)
- >>> l2_weights = np.array([-1, 0.9, -0.2 , 0.4])
- >>> l2_bias = np.array([0.5])
- >>> l2 = FeedForwardLayer(l2_weights, l2_bias, activation_fn=sigmoid)
- >>> nn = NeuralNetwork([l1, l2])
- >>> nn # doctest: +ELLIPSIS
-
- >>> nn(np.array([[0], [0.5], [1], [0], [1], [2], [0]]))
- ... # doctest: +NORMALIZE_WHITESPACE
- array([0.53305, 0.36903, 0.265 , 0.53305, 0.265 , 0.18612, 0.53305])
-
- """
-
- def __init__(self, layers):
- self.layers = layers
-
- def process(self, data, reset=True, **kwargs):
- """
- Process the given data with the neural network.
-
- Parameters
- ----------
- data : numpy array, shape (num_frames, num_inputs)
- Activate the network with this data.
- reset : bool, optional
- Reset the network to its initial state before activating it.
-
- Returns
- -------
- numpy array, shape (num_frames, num_outputs)
- Network predictions for this data.
-
- """
- # make data at least 2d (required by NN-layers)
- if data.ndim < 2:
- data = np.array(data, subok=True, copy=False, ndmin=2)
- # loop over all layers
- for layer in self.layers:
- # activate the layer and feed the output into the next one
- data = layer.activate(data, reset=reset)
- # ravel the predictions if needed
- if data.ndim == 2 and data.shape[1] == 1:
- data = data.ravel()
- return data
-
- def reset(self):
- """
- Reset the neural network to its initial state.
-
- """
- for layer in self.layers:
- layer.reset()
-
-
-class NeuralNetworkEnsemble(SequentialProcessor):
- """
- Neural Network ensemble class.
-
- Parameters
- ----------
- networks : list
- List of the Neural Networks.
- ensemble_fn : function or callable, optional
- Ensemble function to be applied to the predictions of the neural
- network ensemble (default: average predictions).
- num_threads : int, optional
- Number of parallel working threads.
-
- Notes
- -----
- If `ensemble_fn` is set to 'None', the predictions are returned as a list
- with the same length as the number of networks given.
-
- Examples
- --------
- Create a NeuralNetworkEnsemble from the networks. Instead of supplying
- the neural networks as parameter, they can also be loaded from file:
-
- >>> from madmom.models import ONSETS_BRNN_PP
- >>> nn = NeuralNetworkEnsemble.load(ONSETS_BRNN_PP)
- >>> nn # doctest: +ELLIPSIS
-
- >>> nn(np.array([[0], [0.5], [1], [0], [1], [2], [0]]))
- ... # doctest: +NORMALIZE_WHITESPACE
- array([0.00116, 0.00213, 0.01428, 0.00729, 0.0088 , 0.21965, 0.00532])
-
- """
-
- def __init__(self, networks, ensemble_fn=average_predictions,
- num_threads=None, **kwargs):
- networks_processor = ParallelProcessor(networks,
- num_threads=num_threads)
- super(NeuralNetworkEnsemble, self).__init__((networks_processor,
- ensemble_fn))
-
- @classmethod
- def load(cls, nn_files, **kwargs):
- """
- Instantiate a new Neural Network ensemble from a list of files.
-
- Parameters
- ----------
- nn_files : list
- List of neural network model file names.
- kwargs : dict, optional
- Keyword arguments passed to NeuralNetworkEnsemble.
-
- Returns
- -------
- NeuralNetworkEnsemble
- NeuralNetworkEnsemble instance.
-
- """
- networks = [NeuralNetwork.load(f) for f in nn_files]
- return cls(networks, **kwargs)
-
- @staticmethod
- def add_arguments(parser, nn_files):
- """
- Add neural network options to an existing parser.
-
- Parameters
- ----------
- parser : argparse parser instance
- Existing argparse parser object.
- nn_files : list
- Neural network model files.
-
- Returns
- -------
- argparse argument group
- Neural network argument parser group.
-
- """
- # pylint: disable=signature-differs
- from madmom.utils import OverrideDefaultListAction
- # add neural network options
- g = parser.add_argument_group('neural network arguments')
- g.add_argument('--nn_files', action=OverrideDefaultListAction,
- type=str, default=nn_files,
- help='average the predictions of these pre-trained '
- 'neural networks (multiple files can be given, '
- 'one file per argument)')
- # return the argument group so it can be modified if needed
- return g
diff --git a/spaces/MathysL/AutoGPT4/autogpt/commands/image_gen.py b/spaces/MathysL/AutoGPT4/autogpt/commands/image_gen.py
deleted file mode 100644
index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000
--- a/spaces/MathysL/AutoGPT4/autogpt/commands/image_gen.py
+++ /dev/null
@@ -1,163 +0,0 @@
-""" Image Generation Module for AutoGPT."""
-import io
-import os.path
-import uuid
-from base64 import b64decode
-
-import openai
-import requests
-from PIL import Image
-
-from autogpt.config import Config
-from autogpt.workspace import path_in_workspace
-
-CFG = Config()
-
-
-def generate_image(prompt: str, size: int = 256) -> str:
- """Generate an image from a prompt.
-
- Args:
- prompt (str): The prompt to use
- size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
-
- Returns:
- str: The filename of the image
- """
- filename = f"{str(uuid.uuid4())}.jpg"
-
- # DALL-E
- if CFG.image_provider == "dalle":
- return generate_image_with_dalle(prompt, filename, size)
- # HuggingFace
- elif CFG.image_provider == "huggingface":
- return generate_image_with_hf(prompt, filename)
- # SD WebUI
- elif CFG.image_provider == "sdwebui":
- return generate_image_with_sd_webui(prompt, filename, size)
- return "No Image Provider Set"
-
-
-def generate_image_with_hf(prompt: str, filename: str) -> str:
- """Generate an image with HuggingFace's API.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
-
- Returns:
- str: The filename of the image
- """
- API_URL = (
- f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
- )
- if CFG.huggingface_api_token is None:
- raise ValueError(
- "You need to set your Hugging Face API token in the config file."
- )
- headers = {
- "Authorization": f"Bearer {CFG.huggingface_api_token}",
- "X-Use-Cache": "false",
- }
-
- response = requests.post(
- API_URL,
- headers=headers,
- json={
- "inputs": prompt,
- },
- )
-
- image = Image.open(io.BytesIO(response.content))
- print(f"Image Generated for prompt:{prompt}")
-
- image.save(path_in_workspace(filename))
-
- return f"Saved to disk:{filename}"
-
-
-def generate_image_with_dalle(prompt: str, filename: str) -> str:
- """Generate an image with DALL-E.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
-
- Returns:
- str: The filename of the image
- """
- openai.api_key = CFG.openai_api_key
-
- # Check for supported image sizes
- if size not in [256, 512, 1024]:
- closest = min([256, 512, 1024], key=lambda x: abs(x - size))
- print(
- f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
- )
- size = closest
-
- response = openai.Image.create(
- prompt=prompt,
- n=1,
- size=f"{size}x{size}",
- response_format="b64_json",
- )
-
- print(f"Image Generated for prompt:{prompt}")
-
- image_data = b64decode(response["data"][0]["b64_json"])
-
- with open(path_in_workspace(filename), mode="wb") as png:
- png.write(image_data)
-
- return f"Saved to disk:{filename}"
-
-
-def generate_image_with_sd_webui(
- prompt: str,
- filename: str,
- size: int = 512,
- negative_prompt: str = "",
- extra: dict = {},
-) -> str:
- """Generate an image with Stable Diffusion webui.
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
- size (int, optional): The size of the image. Defaults to 256.
- negative_prompt (str, optional): The negative prompt to use. Defaults to "".
- extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
- Returns:
- str: The filename of the image
- """
- # Create a session and set the basic auth if needed
- s = requests.Session()
- if CFG.sd_webui_auth:
- username, password = CFG.sd_webui_auth.split(":")
- s.auth = (username, password or "")
-
- # Generate the images
- response = requests.post(
- f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
- json={
- "prompt": prompt,
- "negative_prompt": negative_prompt,
- "sampler_index": "DDIM",
- "steps": 20,
- "cfg_scale": 7.0,
- "width": size,
- "height": size,
- "n_iter": 1,
- **extra,
- },
- )
-
- print(f"Image Generated for prompt:{prompt}")
-
- # Save the image to disk
- response = response.json()
- b64 = b64decode(response["images"][0].split(",", 1)[0])
- image = Image.open(io.BytesIO(b64))
- image.save(path_in_workspace(filename))
-
- return f"Saved to disk:{filename}"
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/context_block.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/context_block.py
deleted file mode 100644
index d60fdb904c749ce3b251510dff3cc63cea70d42e..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/context_block.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-from torch import nn
-
-from ..utils import constant_init, kaiming_init
-from .registry import PLUGIN_LAYERS
-
-
-def last_zero_init(m):
- if isinstance(m, nn.Sequential):
- constant_init(m[-1], val=0)
- else:
- constant_init(m, val=0)
-
-
-@PLUGIN_LAYERS.register_module()
-class ContextBlock(nn.Module):
- """ContextBlock module in GCNet.
-
- See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'
- (https://arxiv.org/abs/1904.11492) for details.
-
- Args:
- in_channels (int): Channels of the input feature map.
- ratio (float): Ratio of channels of transform bottleneck
- pooling_type (str): Pooling method for context modeling.
- Options are 'att' and 'avg', stand for attention pooling and
- average pooling respectively. Default: 'att'.
- fusion_types (Sequence[str]): Fusion method for feature fusion,
- Options are 'channels_add', 'channel_mul', stand for channelwise
- addition and multiplication respectively. Default: ('channel_add',)
- """
-
- _abbr_ = 'context_block'
-
- def __init__(self,
- in_channels,
- ratio,
- pooling_type='att',
- fusion_types=('channel_add', )):
- super(ContextBlock, self).__init__()
- assert pooling_type in ['avg', 'att']
- assert isinstance(fusion_types, (list, tuple))
- valid_fusion_types = ['channel_add', 'channel_mul']
- assert all([f in valid_fusion_types for f in fusion_types])
- assert len(fusion_types) > 0, 'at least one fusion should be used'
- self.in_channels = in_channels
- self.ratio = ratio
- self.planes = int(in_channels * ratio)
- self.pooling_type = pooling_type
- self.fusion_types = fusion_types
- if pooling_type == 'att':
- self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
- self.softmax = nn.Softmax(dim=2)
- else:
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
- if 'channel_add' in fusion_types:
- self.channel_add_conv = nn.Sequential(
- nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
- nn.LayerNorm([self.planes, 1, 1]),
- nn.ReLU(inplace=True), # yapf: disable
- nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
- else:
- self.channel_add_conv = None
- if 'channel_mul' in fusion_types:
- self.channel_mul_conv = nn.Sequential(
- nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
- nn.LayerNorm([self.planes, 1, 1]),
- nn.ReLU(inplace=True), # yapf: disable
- nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
- else:
- self.channel_mul_conv = None
- self.reset_parameters()
-
- def reset_parameters(self):
- if self.pooling_type == 'att':
- kaiming_init(self.conv_mask, mode='fan_in')
- self.conv_mask.inited = True
-
- if self.channel_add_conv is not None:
- last_zero_init(self.channel_add_conv)
- if self.channel_mul_conv is not None:
- last_zero_init(self.channel_mul_conv)
-
- def spatial_pool(self, x):
- batch, channel, height, width = x.size()
- if self.pooling_type == 'att':
- input_x = x
- # [N, C, H * W]
- input_x = input_x.view(batch, channel, height * width)
- # [N, 1, C, H * W]
- input_x = input_x.unsqueeze(1)
- # [N, 1, H, W]
- context_mask = self.conv_mask(x)
- # [N, 1, H * W]
- context_mask = context_mask.view(batch, 1, height * width)
- # [N, 1, H * W]
- context_mask = self.softmax(context_mask)
- # [N, 1, H * W, 1]
- context_mask = context_mask.unsqueeze(-1)
- # [N, 1, C, 1]
- context = torch.matmul(input_x, context_mask)
- # [N, C, 1, 1]
- context = context.view(batch, channel, 1, 1)
- else:
- # [N, C, 1, 1]
- context = self.avg_pool(x)
-
- return context
-
- def forward(self, x):
- # [N, C, 1, 1]
- context = self.spatial_pool(x)
-
- out = x
- if self.channel_mul_conv is not None:
- # [N, C, 1, 1]
- channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
- out = out * channel_mul_term
- if self.channel_add_conv is not None:
- # [N, C, 1, 1]
- channel_add_term = self.channel_add_conv(context)
- out = out + channel_add_term
-
- return out
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/cldm/model.py b/spaces/Mellow-ai/PhotoAI_Mellow/cldm/model.py
deleted file mode 100644
index fed3c31ac145b78907c7f771d1d8db6fb32d92ed..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/cldm/model.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-import torch
-
-from omegaconf import OmegaConf
-from ldm.util import instantiate_from_config
-
-
-def get_state_dict(d):
- return d.get('state_dict', d)
-
-
-def load_state_dict(ckpt_path, location='cpu'):
- _, extension = os.path.splitext(ckpt_path)
- if extension.lower() == ".safetensors":
- import safetensors.torch
- state_dict = safetensors.torch.load_file(ckpt_path, device=location)
- else:
- state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))
- state_dict = get_state_dict(state_dict)
- print(f'Loaded state_dict from [{ckpt_path}]')
- return state_dict
-
-
-def create_model(config_path):
- config = OmegaConf.load(config_path)
- model = instantiate_from_config(config.model).cpu()
- print(f'Loaded model config from [{config_path}]')
- return model
diff --git a/spaces/MirageML/sjc/sd1/ldm/modules/encoders/modules_bak.py b/spaces/MirageML/sjc/sd1/ldm/modules/encoders/modules_bak.py
deleted file mode 100644
index 418fc52d6012a9e4acf6f2ba19ce4d038eb45be2..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/sd1/ldm/modules/encoders/modules_bak.py
+++ /dev/null
@@ -1,510 +0,0 @@
-import torch
-import torch.nn as nn
-from functools import partial
-import clip
-from einops import rearrange, repeat
-from transformers import CLIPTokenizer, CLIPTextModel
-import kornia
-
-from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
-
-def _expand_mask(mask, dtype, tgt_len = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- bsz, src_len = mask.size()
- tgt_len = tgt_len if tgt_len is not None else src_len
-
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
-
- inverted_mask = 1.0 - expanded_mask
-
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
-
-def _build_causal_attention_mask(bsz, seq_len, dtype):
- # lazily create causal attention mask, with full attention between the vision tokens
- # pytorch uses additive attention mask; fill with -inf
- mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
- mask.fill_(torch.tensor(torch.finfo(dtype).min))
- mask.triu_(1) # zero out the lower diagonal
- mask = mask.unsqueeze(1) # expand mask
- return mask
-
-class AbstractEncoder(nn.Module):
- def __init__(self):
- super().__init__()
-
- def encode(self, *args, **kwargs):
- raise NotImplementedError
-
-
-
-class ClassEmbedder(nn.Module):
- def __init__(self, embed_dim, n_classes=1000, key='class'):
- super().__init__()
- self.key = key
- self.embedding = nn.Embedding(n_classes, embed_dim)
-
- def forward(self, batch, key=None):
- if key is None:
- key = self.key
- # this is for use in crossattn
- c = batch[key][:, None]
- c = self.embedding(c)
- return c
-
-
-class TransformerEmbedder(AbstractEncoder):
- """Some transformer encoder layers"""
- def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
- super().__init__()
- self.device = device
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
- attn_layers=Encoder(dim=n_embed, depth=n_layer))
-
- def forward(self, tokens):
- tokens = tokens.to(self.device) # meh
- z = self.transformer(tokens, return_embeddings=True)
- return z
-
- def encode(self, x):
- return self(x)
-
-
-class BERTTokenizer(AbstractEncoder):
- """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
- def __init__(self, device="cuda", vq_interface=True, max_length=77):
- super().__init__()
- from transformers import BertTokenizerFast # TODO: add to reuquirements
- self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
- self.device = device
- self.vq_interface = vq_interface
- self.max_length = max_length
-
- def forward(self, text):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- return tokens
-
- @torch.no_grad()
- def encode(self, text):
- tokens = self(text)
- if not self.vq_interface:
- return tokens
- return None, None, [None, None, tokens]
-
- def decode(self, text):
- return text
-
-
-class BERTEmbedder(AbstractEncoder):
- """Uses the BERT tokenizr model and add some transformer encoder layers"""
- def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
- device="cuda",use_tokenizer=True, embedding_dropout=0.0):
- super().__init__()
- self.use_tknz_fn = use_tokenizer
- if self.use_tknz_fn:
- self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
- self.device = device
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
- attn_layers=Encoder(dim=n_embed, depth=n_layer),
- emb_dropout=embedding_dropout)
-
- def forward(self, text, embedding_manager=None):
- if self.use_tknz_fn:
- tokens = self.tknz_fn(text)#.to(self.device)
- else:
- tokens = text
- z = self.transformer(tokens, return_embeddings=True, embedding_manager=embedding_manager)
- return z
-
- def encode(self, text, **kwargs):
- # output of length 77
- return self(text, **kwargs)
-
-class SpatialRescaler(nn.Module):
- def __init__(self,
- n_stages=1,
- method='bilinear',
- multiplier=0.5,
- in_channels=3,
- out_channels=None,
- bias=False):
- super().__init__()
- self.n_stages = n_stages
- assert self.n_stages >= 0
- assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
- self.multiplier = multiplier
- self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
- self.remap_output = out_channels is not None
- if self.remap_output:
- print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
- self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
-
- def forward(self,x):
- for stage in range(self.n_stages):
- x = self.interpolator(x, scale_factor=self.multiplier)
-
-
- if self.remap_output:
- x = self.channel_mapper(x)
- return x
-
- def encode(self, x):
- return self(x)
-
-class FrozenCLIPEmbedder(AbstractEncoder):
- """Uses the CLIP transformer encoder for text (from Hugging Face)"""
- def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
- super().__init__()
- self.tokenizer = CLIPTokenizer.from_pretrained(version)
- self.transformer = CLIPTextModel.from_pretrained(version)
- self.device = device
- self.max_length = max_length
- self.freeze()
-
- def embedding_forward(
- self,
- input_ids = None,
- position_ids = None,
- inputs_embeds = None,
- embedding_manager = None,
- ) -> torch.Tensor:
-
- seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
-
- if position_ids is None:
- position_ids = self.position_ids[:, :seq_length]
-
- if inputs_embeds is None:
- inputs_embeds = self.token_embedding(input_ids)
-
- if embedding_manager is not None:
- inputs_embeds = embedding_manager(input_ids, inputs_embeds)
-
-
- position_embeddings = self.position_embedding(position_ids)
- embeddings = inputs_embeds + position_embeddings
-
- return embeddings
-
- self.transformer.text_model.embeddings.forward = embedding_forward.__get__(self.transformer.text_model.embeddings)
-
- def encoder_forward(
- self,
- inputs_embeds,
- attention_mask = None,
- causal_attention_mask = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- encoder_states = () if output_hidden_states else None
- all_attentions = () if output_attentions else None
-
- hidden_states = inputs_embeds
- for idx, encoder_layer in enumerate(self.layers):
- if output_hidden_states:
- encoder_states = encoder_states + (hidden_states,)
-
- layer_outputs = encoder_layer(
- hidden_states,
- attention_mask,
- causal_attention_mask,
- output_attentions=output_attentions,
- )
-
- hidden_states = layer_outputs[0]
-
- if output_attentions:
- all_attentions = all_attentions + (layer_outputs[1],)
-
- if output_hidden_states:
- encoder_states = encoder_states + (hidden_states,)
-
- return hidden_states
-
- # if not return_dict:
- # return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
- # return BaseModelOutput(
- # last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
- # )
-
- self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder)
-
-
- def text_encoder_forward(
- self,
- input_ids = None,
- attention_mask = None,
- position_ids = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- embedding_manager = None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is None:
- raise ValueError("You have to specify either input_ids")
-
- input_shape = input_ids.size()
- input_ids = input_ids.view(-1, input_shape[-1])
-
- hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager)
-
- bsz, seq_len = input_shape
- # CLIP's text model uses causal mask, prepare it here.
- # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
- causal_attention_mask = _build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
- hidden_states.device
- )
-
- # expand attention_mask
- if attention_mask is not None:
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
-
- last_hidden_state = self.encoder(
- inputs_embeds=hidden_states,
- attention_mask=attention_mask,
- causal_attention_mask=causal_attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- # last_hidden_state = encoder_outputs[0]
- last_hidden_state = self.final_layer_norm(last_hidden_state)
-
- # text_embeds.shape = [batch_size, sequence_length, transformer.width]
- # take features from the eot embedding (eot_token is the highest number in each sequence)
- # pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)]
-
- # if not return_dict:
- # return (last_hidden_state, pooled_output) + encoder_outputs[1:]
-
- return last_hidden_state
-
- self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model)
-
- def transformer_forward(
- self,
- input_ids = None,
- attention_mask = None,
- position_ids = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- embedding_manager = None,
- ):
- return self.text_model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- embedding_manager = embedding_manager
- )
-
- self.transformer.forward = transformer_forward.__get__(self.transformer)
-
-
- # def update_embedding_func(self, embedding_manager):
- # text_model = self.transformer.text_model
- # # text_model.old_embeddings = text_model.embeddings
-
- # # def new_embeddings(
- # # input_ids = None,
- # # position_ids = None,
- # # inputs_embeds = None,
- # # ) -> torch.Tensor:
-
- # # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
-
- # # if position_ids is None:
- # # position_ids = text_model.old_embeddings.position_ids[:, :seq_length]
-
- # # if inputs_embeds is None:
- # # inputs_embeds = text_model.old_embeddings.token_embedding(input_ids)
-
-
- # # inputs_embeds = embedding_manager(input_ids, inputs_embeds)
-
- # # position_embeddings = text_model.old_embeddings.position_embedding(position_ids)
- # # embeddings = inputs_embeds + position_embeddings
-
- # # return embeddings
-
- # # del text_model.embeddings
- # # text_model.embeddings = new_embeddings
-
- # # class NewEmbeddings(torch.nn.Module):
-
- # # def __init__(self, orig_embedder):
- # # super().__init__()
- # # self.orig_embedder = orig_embedder
-
- # # def forward(
- # # self,
- # # input_ids = None,
- # # position_ids = None,
- # # inputs_embeds = None,
- # # ) -> torch.Tensor:
-
- # # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
-
- # # if position_ids is None:
- # # position_ids = self.orig_embedder.position_ids[:, :seq_length]
-
- # # if inputs_embeds is None:
- # # inputs_embeds = self.orig_embedder.token_embedding(input_ids)
-
- # # inputs_embeds = embedding_manager(input_ids, inputs_embeds)
-
- # # position_embeddings = self.orig_embedder.position_embedding(position_ids)
- # # embeddings = inputs_embeds + position_embeddings
-
- # # return embeddings
-
- # # # self.new_embeddings =
- # # # text_model.embeddings = new_embeddings.__call__.__get__(text_model)
- # # text_model.embeddings = NewEmbeddings(text_model.embeddings)
-
- # class NewEmbeddings(torch.nn.Module):
-
- # def __init__(self, orig_embedder, embedding_manager):
- # super().__init__()
- # self.embedding_manager = embedding_manager
- # self.orig_embedder = orig_embedder
-
- # def forward(
- # self,
- # input_ids = None,
- # position_ids = None,
- # inputs_embeds = None,
- # ) -> torch.Tensor:
-
- # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
-
- # if position_ids is None:
- # position_ids = self.orig_embedder.position_ids[:, :seq_length]
-
- # if inputs_embeds is None:
- # inputs_embeds = self.orig_embedder.token_embedding(input_ids)
-
- # # init_embeds = inputs_embeds.clone()
- # inputs_embeds = self.embedding_manager(input_ids, inputs_embeds)
-
- # # print(inputs_embeds - init_embeds)
- # # print((inputs_embeds - init_embeds).max())
- # # exit(0)
-
- # position_embeddings = self.orig_embedder.position_embedding(position_ids)
- # embeddings = inputs_embeds + position_embeddings
-
- # return embeddings
-
- # # self.new_embeddings =
- # # text_model.embeddings = new_embeddings.__call__.__get__(text_model)
- # text_model.embeddings = NewEmbeddings(text_model.embeddings, embedding_manager)
-
- def freeze(self):
- self.transformer = self.transformer.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text, **kwargs):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- z = self.transformer(input_ids=tokens, **kwargs)
-
- return z
-
- def encode(self, text, **kwargs):
- return self(text, **kwargs)
-
-
-class FrozenCLIPTextEmbedder(nn.Module):
- """
- Uses the CLIP transformer encoder for text.
- """
- def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
- super().__init__()
- self.model, _ = clip.load(version, jit=False, device="cpu")
- self.device = device
- self.max_length = max_length
- self.n_repeat = n_repeat
- self.normalize = normalize
-
- def freeze(self):
- self.model = self.model.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text):
- tokens = clip.tokenize(text).to(self.device)
- z = self.model.encode_text(tokens)
- if self.normalize:
- z = z / torch.linalg.norm(z, dim=1, keepdim=True)
- return z
-
- def encode(self, text):
- z = self(text)
- if z.ndim==2:
- z = z[:, None, :]
- z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
- return z
-
-
-class FrozenClipImageEmbedder(nn.Module):
- """
- Uses the CLIP image encoder.
- """
- def __init__(
- self,
- model,
- jit=False,
- device='cuda' if torch.cuda.is_available() else 'cpu',
- antialias=False,
- ):
- super().__init__()
- self.model, _ = clip.load(name=model, device=device, jit=jit)
-
- self.antialias = antialias
-
- self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
- self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
-
- def preprocess(self, x):
- # normalize to [0,1]
- x = kornia.geometry.resize(x, (224, 224),
- interpolation='bicubic',align_corners=True,
- antialias=self.antialias)
- x = (x + 1.) / 2.
- # renormalize according to clip
- x = kornia.enhance.normalize(x, self.mean, self.std)
- return x
-
- def forward(self, x):
- # x is assumed to be in range [-1,1]
- return self.model.encode_image(self.preprocess(x))
-
-
-if __name__ == "__main__":
- from ldm.util import count_params
- model = FrozenCLIPEmbedder()
- count_params(model, verbose=True)
\ No newline at end of file
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/heads/pse_head.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/heads/pse_head.py
deleted file mode 100644
index 0aee6a07b4d6325d22a14dc76c2796391ce62eab..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/heads/pse_head.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Dict, List, Optional, Union
-
-from mmocr.registry import MODELS
-from . import PANHead
-
-
-@MODELS.register_module()
-class PSEHead(PANHead):
- """The class for PSENet head.
-
- Args:
- in_channels (list[int]): A list of numbers of input channels.
- hidden_dim (int): The hidden dimension of the first convolutional
- layer.
- out_channel (int): Number of output channels.
- module_loss (dict): Configuration dictionary for loss type. Supported
- loss types are "PANModuleLoss" and "PSEModuleLoss". Defaults to
- PSEModuleLoss.
- postprocessor (dict): Config of postprocessor for PSENet.
- init_cfg (dict or list[dict], optional): Initialization configs.
- """
-
- def __init__(self,
- in_channels: List[int],
- hidden_dim: int,
- out_channel: int,
- module_loss: Dict = dict(type='PSEModuleLoss'),
- postprocessor: Dict = dict(
- type='PSEPostprocessor', text_repr_type='poly'),
- init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None:
-
- super().__init__(
- in_channels=in_channels,
- hidden_dim=hidden_dim,
- out_channel=out_channel,
- module_loss=module_loss,
- postprocessor=postprocessor,
- init_cfg=init_cfg)
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py
deleted file mode 100644
index 126d119f3e3853c53d1a0a584c6cfbc0197ca90c..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/robust_scanner_fusion_layer.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-from mmengine.model import BaseModule
-
-
-class RobustScannerFusionLayer(BaseModule):
-
- def __init__(self, dim_model, dim=-1, init_cfg=None):
- super().__init__(init_cfg=init_cfg)
-
- self.dim_model = dim_model
- self.dim = dim
-
- self.linear_layer = nn.Linear(dim_model * 2, dim_model * 2)
- self.glu_layer = nn.GLU(dim=dim)
-
- def forward(self, x0, x1):
- assert x0.size() == x1.size()
- fusion_input = torch.cat([x0, x1], self.dim)
- output = self.linear_layer(fusion_input)
- output = self.glu_layer(output)
-
- return output
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ctc_module_loss.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ctc_module_loss.py
deleted file mode 100644
index e98d7b4c905487d1158402dd00d82570207513b5..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ctc_module_loss.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-from typing import Dict, Sequence, Union
-
-import torch
-import torch.nn as nn
-
-from mmocr.models.common.dictionary import Dictionary
-from mmocr.registry import MODELS
-from mmocr.structures import TextRecogDataSample
-from .base import BaseTextRecogModuleLoss
-
-
-@MODELS.register_module()
-class CTCModuleLoss(BaseTextRecogModuleLoss):
- """Implementation of loss module for CTC-loss based text recognition.
-
- Args:
- dictionary (dict or :obj:`Dictionary`): The config for `Dictionary` or
- the instance of `Dictionary`.
- letter_case (str): There are three options to alter the letter cases
- of gt texts:
- - unchanged: Do not change gt texts.
- - upper: Convert gt texts into uppercase characters.
- - lower: Convert gt texts into lowercase characters.
- Usually, it only works for English characters. Defaults to
- 'unchanged'.
- flatten (bool): If True, use flattened targets, else padded targets.
- reduction (str): Specifies the reduction to apply to the output,
- should be one of the following: ('none', 'mean', 'sum').
- zero_infinity (bool): Whether to zero infinite losses and
- the associated gradients. Default: False.
- Infinite losses mainly occur when the inputs
- are too short to be aligned to the targets.
- """
-
- def __init__(self,
- dictionary: Union[Dict, Dictionary],
- letter_case: str = 'unchanged',
- flatten: bool = True,
- reduction: str = 'mean',
- zero_infinity: bool = False,
- **kwargs) -> None:
- super().__init__(dictionary=dictionary, letter_case=letter_case)
- assert isinstance(flatten, bool)
- assert isinstance(reduction, str)
- assert isinstance(zero_infinity, bool)
-
- self.flatten = flatten
- self.ctc_loss = nn.CTCLoss(
- blank=self.dictionary.padding_idx,
- reduction=reduction,
- zero_infinity=zero_infinity)
-
- def forward(self, outputs: torch.Tensor,
- data_samples: Sequence[TextRecogDataSample]) -> Dict:
- """
- Args:
- outputs (Tensor): A raw logit tensor of shape :math:`(N, T, C)`.
- data_samples (list[TextRecogDataSample]): List of
- ``TextRecogDataSample`` which are processed by ``get_target``.
-
- Returns:
- dict: The loss dict with key ``loss_ctc``.
- """
- valid_ratios = None
- if data_samples is not None:
- valid_ratios = [
- img_meta.get('valid_ratio', 1.0) for img_meta in data_samples
- ]
-
- outputs = torch.log_softmax(outputs, dim=2)
- bsz, seq_len = outputs.size(0), outputs.size(1)
- outputs_for_loss = outputs.permute(1, 0, 2).contiguous() # T * N * C
- targets = [
- data_sample.gt_text.indexes[:seq_len]
- for data_sample in data_samples
- ]
- target_lengths = torch.IntTensor([len(t) for t in targets])
- target_lengths = torch.clamp(target_lengths, max=seq_len).long()
- input_lengths = torch.full(
- size=(bsz, ), fill_value=seq_len, dtype=torch.long)
- if self.flatten:
- targets = torch.cat(targets)
- else:
- padded_targets = torch.full(
- size=(bsz, seq_len),
- fill_value=self.dictionary.padding_idx,
- dtype=torch.long)
- for idx, valid_len in enumerate(target_lengths):
- padded_targets[idx, :valid_len] = targets[idx][:valid_len]
- targets = padded_targets
-
- if valid_ratios is not None:
- input_lengths = [
- math.ceil(valid_ratio * seq_len)
- for valid_ratio in valid_ratios
- ]
- input_lengths = torch.Tensor(input_lengths).long()
- loss_ctc = self.ctc_loss(outputs_for_loss, targets, input_lengths,
- target_lengths)
- losses = dict(loss_ctc=loss_ctc)
-
- return losses
-
- def get_targets(
- self, data_samples: Sequence[TextRecogDataSample]
- ) -> Sequence[TextRecogDataSample]:
- """Target generator.
-
- Args:
- data_samples (list[TextRecogDataSample]): It usually includes
- ``gt_text`` information.
-
- Returns:
-
- list[TextRecogDataSample]: updated data_samples. It will add two
- key in data_sample:
-
- - indexes (torch.LongTensor): The index corresponding to the item.
- """
-
- for data_sample in data_samples:
- text = data_sample.gt_text.item
- if self.letter_case in ['upper', 'lower']:
- text = getattr(text, self.letter_case)()
- indexes = self.dictionary.str2idx(text)
- indexes = torch.IntTensor(indexes)
- data_sample.gt_text.indexes = indexes
- return data_samples
diff --git a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_utils.py b/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_utils.py
deleted file mode 100644
index 577237967d0bb26b073f7146eb42106fc630da5e..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_utils.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright 2017 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Utilities for training adversarial text models."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import time
-
-# Dependency imports
-
-import numpy as np
-import tensorflow as tf
-
-flags = tf.app.flags
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('master', '', 'Master address.')
-flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.')
-flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter servers.')
-flags.DEFINE_string('train_dir', '/tmp/text_train',
- 'Directory for logs and checkpoints.')
-flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
-flags.DEFINE_boolean('log_device_placement', False,
- 'Whether to log device placement.')
-
-
-def run_training(train_op,
- loss,
- global_step,
- variables_to_restore=None,
- pretrained_model_dir=None):
- """Sets up and runs training loop."""
- tf.gfile.MakeDirs(FLAGS.train_dir)
-
- # Create pretrain Saver
- if pretrained_model_dir:
- assert variables_to_restore
- tf.logging.info('Will attempt restore from %s: %s', pretrained_model_dir,
- variables_to_restore)
- saver_for_restore = tf.train.Saver(variables_to_restore)
-
- # Init ops
- if FLAGS.sync_replicas:
- local_init_op = tf.get_collection('local_init_op')[0]
- ready_for_local_init_op = tf.get_collection('ready_for_local_init_op')[0]
- else:
- local_init_op = tf.train.Supervisor.USE_DEFAULT
- ready_for_local_init_op = tf.train.Supervisor.USE_DEFAULT
-
- is_chief = FLAGS.task == 0
- sv = tf.train.Supervisor(
- logdir=FLAGS.train_dir,
- is_chief=is_chief,
- save_summaries_secs=30,
- save_model_secs=30,
- local_init_op=local_init_op,
- ready_for_local_init_op=ready_for_local_init_op,
- global_step=global_step)
-
- # Delay starting standard services to allow possible pretrained model restore.
- with sv.managed_session(
- master=FLAGS.master,
- config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement),
- start_standard_services=False) as sess:
- # Initialization
- if is_chief:
- if pretrained_model_dir:
- maybe_restore_pretrained_model(sess, saver_for_restore,
- pretrained_model_dir)
- if FLAGS.sync_replicas:
- sess.run(tf.get_collection('chief_init_op')[0])
- sv.start_standard_services(sess)
-
- sv.start_queue_runners(sess)
-
- # Training loop
- global_step_val = 0
- while not sv.should_stop() and global_step_val < FLAGS.max_steps:
- global_step_val = train_step(sess, train_op, loss, global_step)
-
- # Final checkpoint
- if is_chief and global_step_val >= FLAGS.max_steps:
- sv.saver.save(sess, sv.save_path, global_step=global_step)
-
-
-def maybe_restore_pretrained_model(sess, saver_for_restore, model_dir):
- """Restores pretrained model if there is no ckpt model."""
- ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
- checkpoint_exists = ckpt and ckpt.model_checkpoint_path
- if checkpoint_exists:
- tf.logging.info('Checkpoint exists in FLAGS.train_dir; skipping '
- 'pretraining restore')
- return
-
- pretrain_ckpt = tf.train.get_checkpoint_state(model_dir)
- if not (pretrain_ckpt and pretrain_ckpt.model_checkpoint_path):
- raise ValueError(
- 'Asked to restore model from %s but no checkpoint found.' % model_dir)
- saver_for_restore.restore(sess, pretrain_ckpt.model_checkpoint_path)
-
-
-def train_step(sess, train_op, loss, global_step):
- """Runs a single training step."""
- start_time = time.time()
- _, loss_val, global_step_val = sess.run([train_op, loss, global_step])
- duration = time.time() - start_time
-
- # Logging
- if global_step_val % 10 == 0:
- examples_per_sec = FLAGS.batch_size / duration
- sec_per_batch = float(duration)
-
- format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)')
- tf.logging.info(format_str % (global_step_val, loss_val, examples_per_sec,
- sec_per_batch))
-
- if np.isnan(loss_val):
- raise OverflowError('Loss is nan')
-
- return global_step_val
diff --git a/spaces/NCTCMumbai/NCTC/models/research/autoencoder/autoencoder_models/Autoencoder.py b/spaces/NCTCMumbai/NCTC/models/research/autoencoder/autoencoder_models/Autoencoder.py
deleted file mode 100644
index 788a14642306ece056fc53a85ba8c60d87d31826..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/autoencoder/autoencoder_models/Autoencoder.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import numpy as np
-import tensorflow as tf
-
-
-class Autoencoder(object):
-
- def __init__(self, n_layers, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()):
- self.n_layers = n_layers
- self.transfer = transfer_function
-
- network_weights = self._initialize_weights()
- self.weights = network_weights
-
- # model
- self.x = tf.placeholder(tf.float32, [None, self.n_layers[0]])
- self.hidden_encode = []
- h = self.x
- for layer in range(len(self.n_layers)-1):
- h = self.transfer(
- tf.add(tf.matmul(h, self.weights['encode'][layer]['w']),
- self.weights['encode'][layer]['b']))
- self.hidden_encode.append(h)
-
- self.hidden_recon = []
- for layer in range(len(self.n_layers)-1):
- h = self.transfer(
- tf.add(tf.matmul(h, self.weights['recon'][layer]['w']),
- self.weights['recon'][layer]['b']))
- self.hidden_recon.append(h)
- self.reconstruction = self.hidden_recon[-1]
-
- # cost
- self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
- self.optimizer = optimizer.minimize(self.cost)
-
- init = tf.global_variables_initializer()
- self.sess = tf.Session()
- self.sess.run(init)
-
-
- def _initialize_weights(self):
- all_weights = dict()
- initializer = tf.contrib.layers.xavier_initializer()
- # Encoding network weights
- encoder_weights = []
- for layer in range(len(self.n_layers)-1):
- w = tf.Variable(
- initializer((self.n_layers[layer], self.n_layers[layer + 1]),
- dtype=tf.float32))
- b = tf.Variable(
- tf.zeros([self.n_layers[layer + 1]], dtype=tf.float32))
- encoder_weights.append({'w': w, 'b': b})
- # Recon network weights
- recon_weights = []
- for layer in range(len(self.n_layers)-1, 0, -1):
- w = tf.Variable(
- initializer((self.n_layers[layer], self.n_layers[layer - 1]),
- dtype=tf.float32))
- b = tf.Variable(
- tf.zeros([self.n_layers[layer - 1]], dtype=tf.float32))
- recon_weights.append({'w': w, 'b': b})
- all_weights['encode'] = encoder_weights
- all_weights['recon'] = recon_weights
- return all_weights
-
- def partial_fit(self, X):
- cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
- return cost
-
- def calc_total_cost(self, X):
- return self.sess.run(self.cost, feed_dict={self.x: X})
-
- def transform(self, X):
- return self.sess.run(self.hidden_encode[-1], feed_dict={self.x: X})
-
- def generate(self, hidden=None):
- if hidden is None:
- hidden = np.random.normal(size=self.weights['encode'][-1]['b'])
- return self.sess.run(self.reconstruction, feed_dict={self.hidden_encode[-1]: hidden})
-
- def reconstruct(self, X):
- return self.sess.run(self.reconstruction, feed_dict={self.x: X})
-
- def getWeights(self):
- raise NotImplementedError
- return self.sess.run(self.weights)
-
- def getBiases(self):
- raise NotImplementedError
- return self.sess.run(self.weights)
-
diff --git a/spaces/NN520/AI/src/components/user-menu.tsx b/spaces/NN520/AI/src/components/user-menu.tsx
deleted file mode 100644
index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000
--- a/spaces/NN520/AI/src/components/user-menu.tsx
+++ /dev/null
@@ -1,113 +0,0 @@
-'use client'
-
-import { useEffect, useState } from 'react'
-import Image from 'next/image'
-import { toast } from 'react-hot-toast'
-import { Button } from '@/components/ui/button'
-import pkg from '../../package.json'
-import {
- DropdownMenu,
- DropdownMenuContent,
- DropdownMenuItem,
- DropdownMenuSeparator,
- DropdownMenuTrigger
-} from '@/components/ui/dropdown-menu'
-import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons'
-import SettingIcon from '@/assets/images/settings.svg'
-import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
-
-export function UserMenu() {
- const [host, setHost] = useState('')
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
- useEffect(() => {
- setHost(location.host)
- }, [])
-
- useEffect(() => {
- if (isCopied) {
- toast.success('复制成功')
- }
- }, [isCopied])
- return (
-
- )
-}
diff --git a/spaces/NeuralInternet/Alpaca-LoRA-Serve/README.md b/spaces/NeuralInternet/Alpaca-LoRA-Serve/README.md
deleted file mode 100644
index e00128a610ec3ac542d4b6dd5205f58b7437f547..0000000000000000000000000000000000000000
--- a/spaces/NeuralInternet/Alpaca-LoRA-Serve/README.md
+++ /dev/null
@@ -1,120 +0,0 @@
----
-title: Alpaca-LoRA-Serve
-emoji: 🦙🚀
-sdk: gradio
-sdk_version: 3.22.0
-app_file: app.py
-pinned: true
-license: gpl-3.0
-colorFrom: yellow
-colorTo: green
-duplicated_from: chansung/Alpaca-LoRA-Serve
----
-
-# 🦙 🚀 Alpaca-LoRA as a Chatbot Service
-
-🚧 This project is still under development process. While serving the project, I noticed there are some bugs emitted by the model itself such as too many line breaks which causes OOM eventually. You can propose PR, but I will merge any improvement at any time as soon as I spot any problems.
-
-🔗 **Demo link**: [Batch Mode](https://notebooksf.jarvislabs.ai/43j3x9FSS8Tg0sqvMlDgKPo9vsoSTTKRsX4RIdC3tNd6qeQ6ktlA0tyWRAR3fe_l) and [Streaming Mode](https://notebookse.jarvislabs.ai/BuOu_VbEuUHb09VEVHhfnFq4-PMhBRVCcfHBRCOrq7c4O9GI4dIGoidvNf76UsRL/) (both are running on a single A6000 instance)
-
-The **easiest way** to run this project is to use Colab. Just open up the [alpaca_lora_in_colab](https://github.com/deep-diver/Alpaca-LoRA-Serve/blob/main/notebooks/alpaca_lora_in_colab.ipynb) notebook in Colab (there is a button `open in colab`), and run every cell sequentially. With the standard GPU instance(___T4___), you can run 7B and 13B models. With the premium GPU instance(___A100 40GB___), you can even run 30B model! Screenshot👇🏼 Just note that the connection could be somewhat unstable, so I recommend you to use Colab for development purpose.
-
-
-
-This repository demonstrates Alpaca-LoRA as a Chatbot service with [Alpaca-LoRA](https://github.com/tloen/alpaca-lora) and [Gradio](https://gradio.app/). It comes with the following features:
-
-### Mode
-
-**1. Batch Generation Mode**: batch generation mode aggregates requests up to `batch_size`, and pass the prompts in the requests to the model. It waits the current requests are fully handled. For instance, with `batch_size=4`, if a user sends a request, that is under processing. While it is under processing, if other users are connected, up to 4 requests from the users are aggregated and processed as soon as the current one is done.
-
-**2. Streaming Mode**: streaming mode handles multiple requests in a interleaving way with threads. For instance, if there are two users (A and B) are connected, A's request is handled, and then B's request is handled, and then A's request is handled again.... This is because of the nature of streaming mode which generates and `yield` tokens in one by one manner.
-
-### Context management
-
-- Alpaca-LoRA as a Chatbot Service manages context in two ways. First of all, it remembers(stores) every history of the conversations by default as in the following code snippet. `context_string` is set as ___"Below is a history of instructions that describe tasks, paired with an input that provides further context. Write a response that appropriately completes the request by remembering the conversation history."___ by default, but it could be set manually via the `Context` field on top of the screen.
- - additionally, there is a `Summarize` button in the middle (you need to expand the component labeled as ___"Helper Buttons"___). If you click this button, it automatically input ___"summarize our conversations so far in three sentences."___ as a prompt, and the resulting generated text will be inserted into the `Context` field. THen all the conversation history up to this point will be ignored. That means the conversation fresh restarts with the below code snippet except `context_string` will be filled up with the model generated text.
- - _NOTE: the only last 2,000 characters are kept, and this number can be configured in `constants.py`_
-
-```python
-f"""{context_string}
-
-### Input: {input} # Surrounding information to AI
-
-### Instruction: {prompt1} # First instruction/prompt given by user
-
-### Response {response1} # First response on the first prompt by AI
-
-### Instruction: {prompt2} # Second instruction/prompt given by user
-
-### Response: {response2} # Second response on the first prompt by AI
-....
-"""
-```
-
-### misc.
-
-- There is a `continue` button in the middle of screen. What it does is to simply send ___"continue."___ prompt to the model. This is useful if you get incomplete previous response from the model. With the ___"continue."___, the model tries to complete the response. Also, since this is a continuation of the response, the ___"continue."___ prompt will be hidden to make chatting history more natural.
-
-### Currently supported LoRA checkpoints
- - [tloen/alpaca-lora-7b](https://huggingface.co/tloen/alpaca-lora-7b): the original 7B Alpaca-LoRA checkpoint by tloen
- - [chansung/alpaca-lora-13b](https://huggingface.co/chansung/alpaca-lora-13b): the 13B Alpaca-LoRA checkpoint by myself(chansung) with the same script to tune the original 7B model
- - [chansung/koalpaca-lora-13b](https://huggingface.co/chansung/koalpaca-lora-13b): the 13B Alpaca-LoRA checkpoint by myself(chansung) with the Korean dataset created by [KoAlpaca project](https://github.com/Beomi/KoAlpaca) by Beomi. It works for English(user) to Korean(AI) conversations.
- - [chansung/alpaca-lora-30b](https://huggingface.co/chansung/alpaca-lora-30b): the 30B Alpaca-LoRA checkpoint by myself(chansung) with the same script to tune the original 7B model
-
-## Instructions
-
-0. Prerequisites
-
-Note that the code only works `Python >= 3.9`
-
-```console
-$ conda create -n alpaca-serve python=3.9
-$ conda activate alpaca-serve
-```
-
-1. Install dependencies
-```console
-$ cd Alpaca-LoRA-Serve
-$ pip install -r requirements.txt
-```
-
-2. Run Gradio application
-```console
-$ BASE_URL=decapoda-research/llama-7b-hf
-$ FINETUNED_CKPT_URL=tloen/alpaca-lora-7b
-
-$ python app.py --base_url $BASE_URL --ft_ckpt_url $FINETUNED_CKPT_URL --port 6006
-```
-
-the following flags are supported
-
-```console
-usage: app.py [-h] [--base_url BASE_URL] [--ft_ckpt_url FT_CKPT_URL] [--port PORT] [--batch_size BATCH_SIZE]
- [--api_open API_OPEN] [--share SHARE] [--gen_config_path GEN_CONFIG_PATH]
-
-Gradio Application for Alpaca-LoRA as a chatbot service
-
-optional arguments:
- -h, --help show this help message and exit
- --base_url BASE_URL Hugging Face Hub URL
- --ft_ckpt_url FT_CKPT_URL
- Hugging Face Hub URL
- --port PORT port number where the app is served
- --batch_size BATCH_SIZE
- how many requests to handle at the same time
- default is set to 1 which enables streaming mode
- --api_open API_OPEN do you want to open as API
- --share SHARE do you want to share temporarily (useful in Colab env)
- --gen_config_path GEN_CONFIG_PATH
- which config to use for GenerationConfig
-```
-
-## Design figure
-
-
-
-
-
-## Acknowledgements
-
-I am thankful to [Jarvislabs.ai](https://jarvislabs.ai/) who generously provided free GPU resources to experiment with Alpaca-LoRA deployment and share it to communities to try out.
\ No newline at end of file
diff --git a/spaces/Not-Grim-Refer/Detailed-English-Description-to-Code/readme.md b/spaces/Not-Grim-Refer/Detailed-English-Description-to-Code/readme.md
deleted file mode 100644
index 1f39d266ecf9f01607a0ec1aa757a86f7dac0e90..0000000000000000000000000000000000000000
--- a/spaces/Not-Grim-Refer/Detailed-English-Description-to-Code/readme.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Code-to-Detailed-English-Description
-emoji: 🌍
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.30.3
-app_file: app.py
-pinned: true
-license: mit
-
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-refer
\ No newline at end of file
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py
deleted file mode 100644
index 9c72cb89056f6fc92a8963415e5f3a1e61b33a5b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import sys
-
-from pythainlp import word_tokenize
-
-
-for line in sys.stdin:
- print(" ".join(word_tokenize(line.strip())))
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_file_io.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_file_io.py
deleted file mode 100644
index 425812bf1672489093941e5fa09f9da3171559ee..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_file_io.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import os
-import shutil
-import sys
-import tempfile
-import unittest
-from typing import Optional
-from unittest.mock import MagicMock
-
-
-class TestFileIO(unittest.TestCase):
-
- _tmpdir: Optional[str] = None
- _tmpfile: Optional[str] = None
- _tmpfile_contents = "Hello, World"
-
- @classmethod
- def setUpClass(cls) -> None:
- cls._tmpdir = tempfile.mkdtemp()
- with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
- cls._tmpfile = f.name
- f.write(cls._tmpfile_contents)
- f.flush()
-
- @classmethod
- def tearDownClass(cls) -> None:
- # Cleanup temp working dir.
- if cls._tmpdir is not None:
- shutil.rmtree(cls._tmpdir) # type: ignore
-
- def test_file_io(self):
- from fairseq.file_io import PathManager
-
- with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
- s = f.read()
- self.assertEqual(s, self._tmpfile_contents)
-
- def test_file_io_oss(self):
- # Mock iopath to simulate oss environment.
- sys.modules["iopath"] = MagicMock()
- from fairseq.file_io import PathManager
-
- with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
- s = f.read()
- self.assertEqual(s, self._tmpfile_contents)
-
- def test_file_io_async(self):
- # ioPath `PathManager` is initialized after the first `opena` call.
- try:
- from fairseq.file_io import IOPathManager, PathManager
- _asyncfile = os.path.join(self._tmpdir, "async.txt")
- f = PathManager.opena(_asyncfile, "wb")
- f.close()
-
- finally:
- self.assertTrue(PathManager.async_close())
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py
deleted file mode 100644
index b41bfbe38789ba14e6a5ea938c75d761424c00ab..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import argparse
-import glob
-
-import numpy as np
-
-
-DIM = 1024
-
-
-def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
- target_ids = [tid for tid in target_embs]
- source_mat = np.stack(source_embs.values(), axis=0)
- normalized_source_mat = source_mat / np.linalg.norm(
- source_mat, axis=1, keepdims=True
- )
- target_mat = np.stack(target_embs.values(), axis=0)
- normalized_target_mat = target_mat / np.linalg.norm(
- target_mat, axis=1, keepdims=True
- )
- sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
- if return_sim_mat:
- return sim_mat
- neighbors_map = {}
- for i, sentence_id in enumerate(source_embs):
- idx = np.argsort(sim_mat[i, :])[::-1][:k]
- neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
- return neighbors_map
-
-
-def load_embeddings(directory, LANGS):
- sentence_embeddings = {}
- sentence_texts = {}
- for lang in LANGS:
- sentence_embeddings[lang] = {}
- sentence_texts[lang] = {}
- lang_dir = f"{directory}/{lang}"
- embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
- for embed_file in embedding_files:
- shard_id = embed_file.split(".")[-1]
- embeddings = np.fromfile(embed_file, dtype=np.float32)
- num_rows = embeddings.shape[0] // DIM
- embeddings = embeddings.reshape((num_rows, DIM))
-
- with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
- for idx, line in enumerate(sentence_file):
- sentence_id, sentence = line.strip().split("\t")
- sentence_texts[lang][sentence_id] = sentence
- sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
-
- return sentence_embeddings, sentence_texts
-
-
-def compute_accuracy(directory, LANGS):
- sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
-
- top_1_accuracy = {}
-
- top1_str = " ".join(LANGS) + "\n"
- for source_lang in LANGS:
- top_1_accuracy[source_lang] = {}
- top1_str += f"{source_lang} "
- for target_lang in LANGS:
- top1 = 0
- top5 = 0
- neighbors_map = compute_dist(
- sentence_embeddings[source_lang], sentence_embeddings[target_lang]
- )
- for sentence_id, neighbors in neighbors_map.items():
- if sentence_id == neighbors[0]:
- top1 += 1
- if sentence_id in neighbors[:5]:
- top5 += 1
- n = len(sentence_embeddings[target_lang])
- top1_str += f"{top1/n} "
- top1_str += "\n"
-
- print(top1_str)
- print(top1_str, file=open(f"{directory}/accuracy", "w"))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Analyze encoder outputs")
- parser.add_argument("directory", help="Source language corpus")
- parser.add_argument("--langs", help="List of langs")
- args = parser.parse_args()
- langs = args.langs.split(",")
- compute_accuracy(args.directory, langs)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/sgd.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/sgd.py
deleted file mode 100644
index 8e34fb99a18fff12ab76be5894a84cbbb2f48176..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/sgd.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.optim
-
-from . import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("sgd")
-class SGD(LegacyFairseqOptimizer):
- def __init__(self, args, params):
- super().__init__(args)
- self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
-
- @staticmethod
- def add_args(parser):
- """Add optimizer-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
- help='momentum factor')
- parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- # fmt: on
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- return {
- "lr": self.args.lr[0],
- "momentum": self.args.momentum,
- "weight_decay": self.args.weight_decay,
- }
-
- @property
- def supports_flat_params(self):
- return True
diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/flores101/README.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/flores101/README.md
deleted file mode 100644
index 635c13f40bd0ccab704735bc5c26ea0192ea98cd..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/flores101/README.md
+++ /dev/null
@@ -1,223 +0,0 @@
-
- The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license.
-
Biases and content acknowledgment of Stable Diffusion
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card.
-
Limitations of PSLD
- Our evaluation is based on Stable Diffusion v-1.5 which was trained on the LAION-5B dataset.
- Biases in this dataset and the generative foundation model will be implicitly affecting our algorithm. Our method
- can work with any latent diffusion model and we expect new foundation models trained on better datasets like DataComp
- to mitigate these issues.
-
-
- """
- )
-
-image_blocks.queue(max_size=100, api_open=False)
-image_blocks.launch()
\ No newline at end of file
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilymidi.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilymidi.py
deleted file mode 100644
index e7f5b27d78df00116483dcf9e02b70edb6732fde..0000000000000000000000000000000000000000
--- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilymidi.py
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/home/lily/lilypond-2.24.2/release/binaries/dependencies/install/Python-3.10.8/bin/python3.10
-
-# Copyright (C) 2006--2022 Brailcom, o.p.s.
-#
-# Author: Milan Zamazal
-#
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-
-import optparse
-import os
-import sys
-
-"""
-
-# relocate-preamble.py.in
-#
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# Copyright (C) 2007--2022 Han-Wen Nienhuys
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-#
-
-This is generic code, used for all python scripts.
-
-The quotes are to ensure that the source .py file can still be
-run as a python script, but does not include any sys.path handling.
-Otherwise, the lilypond-book calls inside the build
-might modify installed .pyc files.
-
-"""
-
-# This is needed for installations with a non-default layout, ie where share/
-# is not next to bin/.
-sys.path.insert (0, os.path.join ('/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/share/lilypond/2.24.2', 'python'))
-
-# Dynamic relocation, for installations with a default layout including GUB,
-# but also for execution from the build directory.
-bindir = os.path.abspath (os.path.dirname (sys.argv[0]))
-topdir = os.path.dirname (bindir)
-if bindir.endswith (r'/scripts/out'):
- topdir = os.path.join (os.path.dirname (topdir), 'out')
-datadir = os.path.abspath (os.path.join (topdir, 'share', 'lilypond'))
-for v in [ 'current', '2.24.2' ]:
- sys.path.insert (0, os.path.join (datadir, v, 'python'))
-
-"""
-"""
-
-
-def process_options(args):
- parser = optparse.OptionParser(version="2.24.2")
- parser.add_option('', '--filter-tracks', metavar='REGEXP', action='store', type='string', dest='regexp',
- help="display only tracks numbers, of those track names matching REGEXP")
- parser.add_option('', '--prefix-tracks', metavar='PREFIX', action='store', type='string', dest='prefix',
- help="prefix filtered track numbers with PREFIX")
- parser.add_option('', '--dump', action='store_true', dest='dump',
- help="just dump parsed contents of the MIDI file")
- parser.add_option('', '--pretty', action='store_true', dest='pretty',
- help="dump parsed contents of the MIDI file in human-readable form (implies --dump)")
- parser.usage = parser.usage + " FILE"
- options, args = parser.parse_args(args)
- if len(args) != 1:
- parser.print_help()
- sys.exit(2)
- return options, args
-
-
-def read_midi(file):
- import midi
- return midi.parse(open(file, 'rb').read())
-
-
-def track_info(data):
- tracks = data[1]
-
- def track_name(track):
- name = ''
- for time, event in track:
- if time > 0:
- break
- if event[0] == 255 and event[1] == 3:
- name = event[2]
- break
- return name
- track_info = []
- for i in range(len(tracks)):
- track_info.append((i, track_name(tracks[i])))
- return track_info
-
-
-class formatter:
- def __init__(self, txt=""):
- self.text = txt
-
- def format_vals(self, val1, val2=""):
- return str(val1) + str(val2)
-
- def format(self, val1, val2=""):
- return self.text + self.format_vals(val1, val2)
-
-
-class none_formatter (formatter):
- def format_vals(self, val1, val2=""):
- return ''
-
-
-class meta_formatter (formatter):
- def format_vals(self, val1, val2):
- return str(val2)
-
-
-class tempo_formatter (formatter):
- def format_vals(self, val1, val2):
- return str(ord(val2[0])*65536 + ord(val2[1])*256 + ord(val2[2])) \
- + " msec/quarter"
-
-
-class time_signature_formatter (formatter):
- def format_vals(self, val1, val2=""):
- from fractions import Fraction
- # if there are more notated 32nd notes per midi quarter than 8,
- # we display a fraction smaller than 1 as scale factor.
- r = Fraction(8, ord(val2[3]))
- if r == 1:
- ratio = ""
- else:
- ratio = " *" + str(r)
- return str(ord(val2[0])) + "/" + str(1 << ord(val2[1])) + ratio \
- + ", metronome " + str(Fraction(ord(val2[2]), 96))
-
-
-class key_signature_formatter (formatter):
- def format_vals(self, val1, val2):
- key_names = ['F', 'C', 'G', 'D', 'A', 'E', 'B']
- key = (((ord(val2[0])+128) % 256)-128) + ord(val2[1])*3 + 1
- return (key_names[key % 7] + (key//7) * "is" + (-(key//7)) * "es"
- + " " + ['major', 'minor'][ord(val2[1])])
-
-
-class channel_formatter (formatter):
- def __init__(self, txt, ch):
- formatter.__init__(self, txt)
- self.channel = ch
-
- def format(self, val1, val2=""):
- return self.text + "Channel " + str(self.channel) + ", " + \
- self.format_vals(val1, val2)
-
-
-class control_mode_formatter (formatter):
- def __init__(self, txt, ch):
- formatter.__init__(self, txt)
- self.mode = ch
-
- def format(self, val1, val2=""):
- return self.text + str(self.mode) + ", " + \
- self.format_vals(val1, val2)
-
-
-class note_formatter (channel_formatter):
- def pitch(self, val):
- pitch_names = ['C', 'Cis', 'D', 'Dis', 'E',
- 'F', 'Fis', 'G', 'Gis', 'A', 'Ais', 'B']
- p = val % 12
- oct = val // 12 - 1
- return pitch_names[p] + str(oct) + "(" + str(val) + ")"
-
- def velocity(self, val):
- return str(val)
-
- def format_vals(self, val1, val2):
- if val2 > 0:
- return self.pitch(val1) + '@' + self.velocity(val2)
- return self.pitch(val1)
-
-
-meta_dict = {0x00: meta_formatter("Seq.Nr.: "),
- 0x01: meta_formatter("Text: "),
- 0x02: meta_formatter("Copyright: "),
- 0x03: meta_formatter("Track name: "),
- 0x04: meta_formatter("Instrument: "),
- 0x05: meta_formatter("Lyric: "),
- 0x06: meta_formatter("Marker: "),
- 0x07: meta_formatter("Cue point: "),
- 0x2F: none_formatter("End of Track"),
- 0x51: tempo_formatter("Tempo: "),
- 0x54: meta_formatter("SMPTE Offs.:"),
- 0x58: time_signature_formatter("Time signature: "),
- 0x59: key_signature_formatter("Key signature: ")
- }
-
-
-def dump_event(ev, time, padding):
- ch = ev[0] & 0x0F
- func = ev[0] & 0xF0
- f = None
- if ev[0] == 0xFF:
- f = meta_dict.get(ev[1], formatter())
- if func == 0x80:
- f = note_formatter("Note off: ", ch)
- elif func == 0x90:
- if ev[2] == 0:
- desc = "Note off: "
- else:
- desc = "Note on: "
- f = note_formatter(desc, ch)
- elif func == 0xA0:
- f = note_formatter("Polyphonic aftertouch: ",
- ch, "Aftertouch pressure: ")
- elif func == 0xB0:
- f = control_mode_formatter("Control mode change: ", ch)
- elif func == 0xC0:
- f = channel_formatter("Program Change: ", ch)
- elif func == 0xD0:
- f = channel_formatter("Channel aftertouch: ", ch)
- elif ev[0] in [0xF0, 0xF7]:
- f = meta_formatter("System-exclusive event: ")
-
- if f:
- if len(ev) > 2:
- print(padding + f.format(ev[1], ev[2]))
- elif len(ev) > 1:
- print(padding + f.format(ev[1]))
- else:
- print(padding + f.format())
- else:
- print(padding + "Unrecognized MIDI event: " + str(ev))
-
-
-def dump_midi(data, midi_file, options):
- if not options.pretty:
- print(data)
- return
- # First, dump general info, #tracks, etc.
- print("Filename: " + midi_file)
- i = data[0]
- m_formats = {0: 'single multi-channel track',
- 1: "one or more simultaneous tracks",
- 2: "one or more sequentially independent single-track patterns"}
- print("MIDI format: " + str(i[0]) + " (" + m_formats.get(i[0], "") + ")")
- print("Divisions: " + str(i[1]) + " per whole note")
- print("#Tracks: " + str(len(data[1])))
- n = 0
- for tr in data[1]:
- time = 0
- n += 1
- print()
- print("Track " + str(n) + ":")
- print(" Time 0:")
- for ev in tr:
- if ev[0] > time:
- time = ev[0]
- print(" Time " + str(time) + ": ")
- dump_event(ev[1], time, " ")
-
-
-def go():
- options, args = process_options(sys.argv[1:])
- midi_file = args[0]
- midi_data = read_midi(midi_file)
- info = track_info(midi_data)
- if (options.dump or options.pretty):
- dump_midi(midi_data, midi_file, options)
- elif options.regexp:
- import re
- regexp = re.compile(options.regexp)
- numbers = [str(n+1) for n, name in info if regexp.search(name)]
- if numbers:
- if options.prefix:
- sys.stdout.write('%s ' % (options.prefix,))
- sys.stdout.write(','.join(numbers))
- sys.stdout.write('\n')
- else:
- for n, name in info:
- sys.stdout.write('%d %s\n' % (n+1, name,))
-
-
-if __name__ == '__main__':
- go()
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilypond-book.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilypond-book.py
deleted file mode 100644
index 6418b0b6c5b8242839c0904ffa3de8191a3ab2c9..0000000000000000000000000000000000000000
--- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilypond-book.py
+++ /dev/null
@@ -1,790 +0,0 @@
-#!/home/lily/lilypond-2.24.2/release/binaries/dependencies/install/Python-3.10.8/bin/python3.10
-# -*- coding: utf-8 -*-
-
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# Copyright (C) 1998--2022 Han-Wen Nienhuys
-# Jan Nieuwenhuizen
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-
-r'''
-Example usage:
-
-test:
- lilypond-book --filter="tr '[a-z]' '[A-Z]'" BOOK
-
-convert-ly on book:
- lilypond-book --filter="convert-ly --no-version --from=1.6.11 -" BOOK
-
-classic lilypond-book:
- lilypond-book --process="lilypond" BOOK.tely
-
-TODO:
-
- * ly-options: intertext?
- * --line-width?
- * eps in latex / eps by lilypond -b ps?
- * check latex parameters, twocolumn, multicolumn?
- * use --png --ps --pdf for making images?
-
- * Converting from lilypond-book source, substitute:
- @mbinclude foo.itely -> @include foo.itely
- \mbinput -> \input
-
-'''
-
-
-# TODO: Better solve the global_options copying to the snippets...
-
-import gettext
-import glob
-import hashlib
-from optparse import OptionGroup, SUPPRESS_HELP
-import os
-import re
-import shlex
-import stat
-import subprocess
-import sys
-import tempfile
-import typing
-
-# See lock_path and unlock_path; this module is not available at all on Windows.
-if os.name == 'posix':
- import fcntl
-
-"""
-
-# relocate-preamble.py.in
-#
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# Copyright (C) 2007--2022 Han-Wen Nienhuys
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-#
-
-This is generic code, used for all python scripts.
-
-The quotes are to ensure that the source .py file can still be
-run as a python script, but does not include any sys.path handling.
-Otherwise, the lilypond-book calls inside the build
-might modify installed .pyc files.
-
-"""
-
-# This is needed for installations with a non-default layout, ie where share/
-# is not next to bin/.
-sys.path.insert (0, os.path.join ('/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/share/lilypond/2.24.2', 'python'))
-
-# Dynamic relocation, for installations with a default layout including GUB,
-# but also for execution from the build directory.
-bindir = os.path.abspath (os.path.dirname (sys.argv[0]))
-topdir = os.path.dirname (bindir)
-if bindir.endswith (r'/scripts/out'):
- topdir = os.path.join (os.path.dirname (topdir), 'out')
-datadir = os.path.abspath (os.path.join (topdir, 'share', 'lilypond'))
-for v in [ 'current', '2.24.2' ]:
- sys.path.insert (0, os.path.join (datadir, v, 'python'))
-
-"""
-"""
-
-import book_base
-import book_docbook
-import book_html
-import book_latex
-import book_texinfo
-import book_snippets
-
-# Load translation and install _() into Python's builtins namespace.
-gettext.install('lilypond', '/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/share/locale')
-
-import lilylib as ly
-
-backend = 'ps'
-
-help_summary = (
- _("Process LilyPond snippets in hybrid HTML, LaTeX, texinfo or DocBook document.")
- + '\n\n'
- + _("Examples:")
- + '''
- $ lilypond-book --filter="tr '[a-z]' '[A-Z]'" %(BOOK)s
- $ lilypond-book -F "convert-ly --no-version --from=2.0.0 -" %(BOOK)s
- $ lilypond-book --process='lilypond -I include' %(BOOK)s
-''' % {'BOOK': _("BOOK")})
-
-authors = ('Jan Nieuwenhuizen ',
- 'Han-Wen Nienhuys ')
-
-################################################################
-
-
-def exit(i):
- if ly.is_verbose():
- raise Exception(_('Exiting (%d)...') % i)
- else:
- sys.exit(i)
-
-
-progress = ly.progress
-warning = ly.warning
-error = ly.error
-
-program_version = '2.24.2'
-if program_version.startswith("@"):
- # '@' in lilypond-book output confuses texinfo
- program_version = "dev"
-
-
-def identify():
- progress('%s (GNU LilyPond) %s' % (ly.program_name, program_version))
-
-
-def warranty():
- identify()
- sys.stdout.write('''
-%s
-
- %s
-
-%s
-%s
-''' % (_('Copyright (c) %s by') % '2001--2023',
- '\n '.join(authors),
- _("Distributed under terms of the GNU General Public License."),
- _("It comes with NO WARRANTY.")))
-
-
-def get_option_parser():
- p = ly.get_option_parser(usage=_("%s [OPTION]... FILE") % 'lilypond-book',
- description=help_summary,
- conflict_handler="resolve",
- add_help_option=False)
-
- p.add_option('-F', '--filter',
- help=_("pipe snippets through FILTER "
- "[default: `convert-ly -n -']"),
- metavar=_("FILTER"),
- action="store",
- dest="filter_cmd",
- default=None)
-
- p.add_option('-f', '--format',
- help=_("use output format FORMAT (texi [default], "
- "texi-html, latex, html, docbook)"),
- metavar=_("FORMAT"),
- action='store')
-
- p.add_option("-h", "--help",
- action="help",
- help=_("show this help and exit"))
-
- # Turn on syntax highlighting using vendored Pygments
- # when building the main documentation. Purposefully
- # undocumented, it is not for end users.
- p.add_option("--highlight",
- action="store_true",
- help=SUPPRESS_HELP)
-
- p.add_option("-I", '--include',
- help=_("add DIR to include path"),
- metavar=_("DIR"),
- action='append',
- dest='include_path',
- default=[])
-
- p.add_option('--info-images-dir',
- help=_("format Texinfo output so that Info will "
- "look for images of music in DIR"),
- metavar=_("DIR"),
- action='store',
- dest='info_images_dir',
- default='')
-
- p.add_option('--left-padding',
- help=_("pad left side of music to align music in spite "
- "of uneven bar numbers (in mm) [default: %default]"),
- metavar=_("PAD"),
- dest="padding_mm",
- type="float",
- default=3.0)
-
- p.add_option('--lily-loglevel',
- help=_("print lilypond log messages according to LOGLEVEL "
- "[default: %default]"),
- metavar=_("LOGLEVEL"),
- action='store',
- dest='lily_loglevel',
- default=os.environ.get("LILYPOND_LOGLEVEL", None))
-
- p.add_option('--lily-output-dir',
- help=_("write lily-XXX files to DIR, "
- "link into --output dir"),
- metavar=_("DIR"),
- action='store',
- dest='lily_output_dir',
- default=None)
-
- p.add_option("-l", "--loglevel",
- help=_("print log messages according to LOGLEVEL "
- "(NONE, ERROR, WARNING, PROGRESS [default], DEBUG)"),
- metavar=_("LOGLEVEL"),
- action='callback',
- callback=ly.handle_loglevel_option,
- type='string')
-
- p.add_option("-o", '--output',
- help=_("write output to DIR"),
- metavar=_("DIR"),
- action='store',
- dest='output_dir',
- default='')
-
- p.add_option('-P', '--process',
- help=_("process ly_files using COMMAND FILE..."),
- metavar=_("COMMAND"),
- action='store',
- dest='process_cmd',
- default='')
-
- p.add_option('--redirect-lilypond-output',
- help=_("redirect the lilypond output"),
- action='store_true',
- dest='redirect_output',
- default=False)
-
- p.add_option('-s', '--safe',
- help=_("removed; using this option results in an error"),
- action="store_true",
- dest="safe_mode",
- default=False)
-
- p.add_option('--skip-lily-check',
- help=_("do not fail if no lilypond output is found"),
- metavar=_("DIR"),
- action='store_true',
- dest='skip_lilypond_run',
- default=False)
-
- p.add_option('--skip-png-check',
- help=_("do not fail if no PNG images "
- "are found for EPS files"),
- metavar=_("DIR"),
- action='store_true',
- dest='skip_png_check',
- default=False)
-
- p.add_option('--use-source-file-names',
- help=_("write snippet output files with the same "
- "base name as their source file"),
- action='store_true',
- dest='use_source_file_names',
- default=False)
-
- p.add_option('-V', '--verbose',
- help=_("be verbose"),
- action="callback",
- callback=ly.handle_loglevel_option,
- callback_args=("DEBUG",))
-
- p.version = "2.24.2"
- p.add_option("--version",
- help=_("show version number and exit"),
- action="version")
-
- p.add_option('-w', '--warranty',
- help=_("show warranty and copyright"),
- action='store_true')
-
- group = OptionGroup(p, "Options only for the latex and texinfo backends")
- group.add_option('--latex-program',
- help=_("run executable PROG instead of latex or, "
- "in case --pdf option is set, "
- "instead of pdflatex"),
- metavar=_("PROG"),
- action='store',
- dest='latex_program',
- default='latex')
- group.add_option('--texinfo-program',
- help=_("run executable PROG instead of texi2pdf"),
- metavar=_("PROG"),
- action='store',
- dest='texinfo_program',
- default='texi2pdf')
- group.add_option('--pdf',
- help=_("create PDF files for use with pdftex"),
- action="store_true",
- dest="create_pdf",
- default=False)
- p.add_option_group(group)
-
- p.add_option_group('',
- description=(
- _("Report bugs via %s")
- % 'bug-lilypond@gnu.org') + '\n')
-
- for formatter in book_base.all_formats:
- formatter.add_options(p)
-
- return p
-
-
-lilypond_binary = os.path.join('/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/bin', 'lilypond')
-
-# If we are called with full path, try to use lilypond binary
-# installed in the same path; this is needed in GUB binaries, where
-# @bindir is always different from the installed binary path.
-if 'bindir' in globals() and bindir:
- lilypond_binary = os.path.join(bindir, 'lilypond')
-
-# Only use installed binary when we are installed too.
-if '/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/bin' == ('@' + 'bindir@') or not os.path.exists(lilypond_binary):
- lilypond_binary = 'lilypond'
-
-# Need to shell-quote, issue 3468
-# FIXME: we should really pass argument lists
-# everywhere instead of playing with shell syntax.
-lilypond_binary = shlex.quote(lilypond_binary)
-
-global_options = None
-
-
-def command_name(cmd):
- # Strip all stuf after command,
- # deal with "((latex ) >& 1 ) .." too
- cmd = re.match(r'([\(\)]*)([^\\ ]*)', cmd).group(2)
- return os.path.basename(cmd)
-
-
-def system_in_directory(cmd_str, directory, log_file):
- """Execute a command in a different directory."""
-
- if ly.is_verbose():
- ly.progress(_("Invoking `%s\'") % cmd_str)
- elif global_options.redirect_output:
- ly.progress(_("Processing %s.ly") % log_file)
- else:
- name = command_name(cmd_str)
- ly.progress(_("Running %s...") % name)
-
- output_location = None
- if global_options.redirect_output:
- output_location = open(log_file + '.log', 'w', encoding='utf-8')
-
- try:
- subprocess.run(cmd_str, stdout=output_location,
- stderr=output_location, cwd=directory,
- shell=True, check=True)
- except subprocess.CalledProcessError as e:
- sys.stderr.write("%s\n" % e)
- sys.exit(1)
-
-
-def process_snippets(cmd, outdated_dict,
- formatter, lily_output_dir):
- """Run cmd on all of the .ly files from snippets."""
- basenames = sorted(outdated_dict.keys())
-
- # No need for a secure hash function, just need a digest.
- checksum = hashlib.md5()
- for name in basenames:
- checksum.update(name.encode('ascii'))
- checksum = checksum.hexdigest()
-
- lily_output_dir = global_options.lily_output_dir
-
- # Write list of snippet names.
- snippet_names_file = 'snippet-names-%s.ly' % checksum
- snippet_names_path = os.path.join(lily_output_dir, snippet_names_file)
- with open(snippet_names_path, 'w', encoding='utf-8') as snippet_names:
- snippet_names.write('\n'.join([name + '.ly' for name in basenames]))
-
- # Run command.
- cmd = formatter.adjust_snippet_command(cmd)
- # Remove .ly ending.
- logfile = os.path.splitext(snippet_names_path)[0]
- snippet_names_arg = mkarg(snippet_names_path.replace(os.path.sep, '/'))
- system_in_directory(' '.join([cmd, snippet_names_arg]),
- lily_output_dir,
- logfile)
- os.unlink(snippet_names_path)
-
-
-def lock_path(name):
- if os.name != 'posix':
- return None
-
- fp = open(name, 'w', encoding='utf-8')
- fcntl.lockf(fp, fcntl.LOCK_EX)
- return fp
-
-
-def unlock_path(lock):
- if os.name != 'posix':
- return None
- fcntl.lockf(lock, fcntl.LOCK_UN)
- lock.close()
-
-
-def do_process_cmd(chunks, options):
- """Wrap do_process_cmd_locked in a filesystem lock"""
- snippets = [c for c in chunks if isinstance(
- c, book_snippets.LilypondSnippet)]
-
- # calculate checksums eagerly
- for s in snippets:
- s.get_checksum()
-
- os.makedirs(options.lily_output_dir, exist_ok=True)
- lock_file = os.path.join(options.lily_output_dir, "lock")
- lock = None
- try:
- lock = lock_path(lock_file)
- do_process_cmd_locked(snippets, options)
- finally:
- if lock:
- unlock_path(lock)
-
-
-def do_process_cmd_locked(snippets, options):
- """Look at all snippets, write the outdated ones, and compile them."""
- outdated = [c for c in snippets if c.is_outdated(options.lily_output_dir)]
-
- if outdated:
- # First unique the list based on the basename, by using them as keys
- # in a dict.
- outdated_dict = dict()
- for snippet in outdated:
- outdated_dict[snippet.basename()] = snippet
-
- # Next call write_ly() for each snippet once.
- progress(_("Writing snippets..."))
- for snippet in outdated_dict.values():
- snippet.write_ly()
-
- progress(_("Processing..."))
- process_snippets(options.process_cmd, outdated_dict,
- options.formatter, options.lily_output_dir)
-
- else:
- progress(_("All snippets are up to date..."))
-
- progress(_("Linking files..."))
- if options.lily_output_dir != options.output_dir:
- for snippet in snippets:
- snippet.link_all_output_files(options.lily_output_dir,
- options.output_dir)
-
-
-###
-# Format guessing data
-
-def guess_format(input_filename):
- format = None
- e = os.path.splitext(input_filename)[1]
- for formatter in book_base.all_formats:
- if formatter.can_handle_extension(e):
- return formatter
- error(_("cannot determine format for: %s" % input_filename))
- exit(1)
-
-def write_if_updated(file_name, lines):
- try:
- with open(file_name, encoding='utf-8') as file:
- old_str = file.read()
- except FileNotFoundError:
- pass
- else:
- new_str = ''.join(lines)
- if old_str == new_str:
- progress(_("%s is up to date.") % file_name)
-
- # this prevents make from always rerunning lilypond-book:
- # output file must be touched in order to be up to date
- os.utime(file_name, None)
- return
-
- output_dir = os.path.dirname(file_name)
- os.makedirs(output_dir, exist_ok=True)
-
- progress(_("Writing `%s'...") % file_name)
- open(file_name, 'w', encoding='utf-8').writelines(lines)
-
-
-def note_input_file(name, inputs=[]):
- # hack: inputs is mutable!
- inputs.append(name)
- return inputs
-
-
-def samefile(f1, f2):
- try:
- return os.path.samefile(f1, f2)
- except AttributeError: # Windoze
- f1 = re.sub("//*", "/", f1)
- f2 = re.sub("//*", "/", f2)
- return f1 == f2
-
-
-def do_file(input_filename, included=False):
- # Ugh.
- input_absname = input_filename
- if not input_filename or input_filename == '-':
- in_handle = sys.stdin
- else:
- if os.path.exists(input_filename):
- input_fullname = input_filename
- else:
- input_fullname = global_options.formatter.input_fullname(
- input_filename)
- # Normalize path to absolute path, since we will change cwd to the output dir!
- # Otherwise, "lilypond-book -o out test.tex" will complain that it is
- # overwriting the input file (which it is actually not), since the
- # input filename is relative to the CWD...
- input_absname = os.path.abspath(input_fullname)
-
- note_input_file(input_fullname)
- in_handle = open(input_fullname, 'r', encoding='utf-8')
-
- if input_filename == '-':
- global_options.input_dir = os.getcwd()
- input_base = 'stdin'
- elif included:
- input_base = os.path.splitext(input_filename)[0]
- else:
- global_options.input_dir = os.path.split(input_absname)[0]
- input_base = os.path.basename(
- os.path.splitext(input_filename)[0])
-
- output_filename = os.path.join(global_options.output_dir,
- input_base + global_options.formatter.default_extension)
- if (os.path.exists(input_filename)
- and os.path.exists(output_filename)
- and samefile(output_filename, input_absname)):
- error(
- _("Output would overwrite input file; use --output."))
- exit(2)
-
- try:
- progress(_("Reading `%s'") % input_absname)
- source = in_handle.read()
-
- if not included:
- global_options.formatter.init_default_snippet_options(source)
-
- progress(_("Dissecting..."))
- chunks = book_base.find_toplevel_snippets(
- source, global_options.formatter, global_options)
- for c in chunks:
- c.set_output_fullpath(output_filename)
-
- # Let the formatter modify the chunks before further processing
- chunks = global_options.formatter.process_chunks(chunks)
-
- def process_include(snippet):
- name = snippet.substring('filename')
- progress(_("Processing include `%s'") % name)
- return do_file(name, included=True)
-
- include_chunks = []
- for x in chunks:
- if isinstance(x, book_snippets.IncludeSnippet):
- include_chunks += process_include(x)
-
- return chunks + include_chunks
-
- except book_snippets.CompileError:
- progress(_("Removing `%s'") % output_filename)
- raise book_snippets.CompileError
-
-
-def do_options():
- global global_options
-
- opt_parser = get_option_parser()
- (global_options, args) = opt_parser.parse_args()
-
- if global_options.safe_mode:
- error("""Due to security vulnerabilities deemed unfixable
-by the developers, LilyPond's safe mode was removed in
-version 2.23.12 in order not to provide a false sense of
-security. If you need to compile an untrusted .ly file, please
-use an external tool to run LilyPond in a sandbox.""")
- raise SystemExit
-
- global_options.information = {
- 'program_version': program_version, 'program_name': ly.program_name}
-
- if global_options.lily_output_dir:
- global_options.lily_output_dir = os.path.expanduser(
- global_options.lily_output_dir)
- if global_options.output_dir:
- global_options.output_dir = os.path.expanduser(
- global_options.output_dir)
-
- # Compute absolute paths of include directories.
- for i, path in enumerate(global_options.include_path):
- global_options.include_path[i] = os.path.abspath(path)
-
- # Append the current directory.
- global_options.include_path.append(os.getcwd())
-
- if global_options.warranty:
- warranty()
- exit(0)
- if not args or len(args) > 1:
- opt_parser.print_help()
- exit(2)
-
- return args
-
-
-def mkarg(x):
- r"""
- A modified version of the commands.mkarg(x)
-
- Uses double quotes (since Windows can't handle the single quotes)
- and escapes the characters \, $, ", and ` for unix shells.
- """
- if os.name == 'nt':
- return ' "%s"' % x
- s = ' "'
- for c in x:
- if c in '\\$"`':
- s = s + '\\'
- s = s + c
- s = s + '"'
- return s
-
-
-def write_output_documents(chunks: typing.List[book_snippets.Chunk], is_filter: bool):
- text_by_path = {}
- for ch in chunks:
- path = ch.output_fullpath()
- if path not in text_by_path:
- text_by_path[path] = []
-
- if is_filter:
- s = ch.filter_text()
- else:
- s = ch.replacement_text()
-
- text_by_path[path].append(s)
-
- for path in text_by_path:
- write_if_updated(path, text_by_path[path])
-
-
-def main():
- if "LILYPOND_BOOK_LOGLEVEL" in os.environ:
- ly.set_loglevel(os.environ["LILYPOND_BOOK_LOGLEVEL"])
- files = do_options()
-
- basename = os.path.splitext(files[0])[0]
- basename = os.path.split(basename)[1]
-
- if global_options.format:
- # Retrieve the formatter for the given format
- for formatter in book_base.all_formats:
- if formatter.can_handle_format(global_options.format):
- global_options.formatter = formatter
- else:
- global_options.formatter = guess_format(files[0])
- global_options.format = global_options.formatter.format
-
- # make the global options available to the formatters:
- global_options.formatter.global_options = global_options
- formats = global_options.formatter.image_formats
-
- if global_options.process_cmd == '':
- global_options.process_cmd = (
- lilypond_binary + ' --formats=%s ' % formats)
-
- global_options.process_cmd += (
- ' '.join([' -I %s' % mkarg(p) for p in global_options.include_path])
- + ' -daux-files ')
-
- global_options.formatter.process_options(global_options)
-
- if global_options.lily_loglevel:
- ly.debug_output(_("Setting LilyPond's loglevel to %s") %
- global_options.lily_loglevel, True)
- global_options.process_cmd += " --loglevel=%s" % global_options.lily_loglevel
- elif ly.is_verbose():
- if os.environ.get("LILYPOND_LOGLEVEL", None):
- ly.debug_output(_("Setting LilyPond's loglevel to %s (from environment variable LILYPOND_LOGLEVEL)") %
- os.environ.get("LILYPOND_LOGLEVEL", None), True)
- global_options.process_cmd += " --loglevel=%s" % os.environ.get(
- "LILYPOND_LOGLEVEL", None)
- else:
- ly.debug_output(
- _("Setting LilyPond's output to --verbose, implied by lilypond-book's setting"), True)
- global_options.process_cmd += " --verbose"
-
- global_options.process_cmd += " -dread-file-list -dno-strip-output-dir"
-
- # Store the original argument to construct the dependency file below.
- relative_output_dir = global_options.output_dir
-
- if global_options.output_dir:
- global_options.output_dir = os.path.abspath(global_options.output_dir)
- # Create the directory, but do not complain if it already exists.
- os.makedirs(global_options.output_dir, exist_ok=True)
- else:
- global_options.output_dir = os.getcwd()
-
- if global_options.lily_output_dir:
- global_options.lily_output_dir = os.path.abspath(
- global_options.lily_output_dir)
- else:
- global_options.lily_output_dir = global_options.output_dir
-
- identify()
- try:
- chunks = do_file(files[0])
- if global_options.filter_cmd:
- write_output_documents(chunks, is_filter=True)
- elif global_options.process_cmd:
- do_process_cmd(chunks, global_options)
- progress(_("Compiling `%s'...") % files[0])
- write_output_documents(chunks, is_filter=False)
- except book_snippets.CompileError:
- exit(1)
-
- inputs = note_input_file('')
- inputs.pop()
-
- base_file_name = os.path.splitext(os.path.basename(files[0]))[0]
- dep_file = os.path.join(global_options.output_dir, base_file_name + '.dep')
- final_output_file = os.path.join(relative_output_dir,
- base_file_name + global_options.formatter.default_extension)
- open(dep_file, 'w', encoding='utf-8').write('%s: %s\n'
- % (final_output_file, ' '.join(inputs)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/tree-il/spec.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/tree-il/spec.go
deleted file mode 100644
index 4758312c5ccade8823204915ca7d715b2e820fd9..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/tree-il/spec.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/whisper-web/assets/index-2d33b655.css b/spaces/PeepDaSlan9/whisper-web/assets/index-2d33b655.css
deleted file mode 100644
index 5ce6e33a6204138cb0a397864ddfa1bfcdb6e591..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/whisper-web/assets/index-2d33b655.css
+++ /dev/null
@@ -1 +0,0 @@
-*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.container{width:100%}@media (min-width: 640px){.container{max-width:640px}}@media (min-width: 768px){.container{max-width:768px}}@media (min-width: 1024px){.container{max-width:1024px}}@media (min-width: 1280px){.container{max-width:1280px}}@media (min-width: 1536px){.container{max-width:1536px}}.static{position:static}.fixed{position:fixed}.absolute{position:absolute}.relative{position:relative}.inset-0{inset:0px}.right-4{right:1rem}.top-0{top:0px}.z-10{z-index:10}.my-2{margin-top:.5rem;margin-bottom:.5rem}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-5{margin-bottom:1.25rem}.ml-2{margin-left:.5rem}.ml-4{margin-left:1rem}.mr-2{margin-right:.5rem}.mr-3{margin-right:.75rem}.mr-5{margin-right:1.25rem}.ms-1{-webkit-margin-start:.25rem;margin-inline-start:.25rem}.mt-0{margin-top:0}.mt-0\.5{margin-top:.125rem}.mt-1{margin-top:.25rem}.mt-3{margin-top:.75rem}.mt-4{margin-top:1rem}.block{display:block}.inline{display:inline}.flex{display:flex}.inline-flex{display:inline-flex}.hidden{display:none}.h-1{height:.25rem}.h-14{height:3.5rem}.h-4{height:1rem}.h-7{height:1.75rem}.h-full{height:100%}.max-h-\[20rem\]{max-height:20rem}.min-h-full{min-height:100%}.min-h-screen{min-height:100vh}.w-4{width:1rem}.w-7{width:1.75rem}.w-\[1px\]{width:1px}.w-full{width:100%}.max-w-md{max-width:28rem}.scale-100{--tw-scale-x: 1;--tw-scale-y: 1;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.scale-95{--tw-scale-x: .95;--tw-scale-y: .95;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.transform{transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{animation:spin 1s linear infinite}.flex-row{flex-direction:row}.flex-row-reverse{flex-direction:row-reverse}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.space-x-2>:not([hidden])~:not([hidden]){--tw-space-x-reverse: 0;margin-right:calc(.5rem * var(--tw-space-x-reverse));margin-left:calc(.5rem * calc(1 - var(--tw-space-x-reverse)))}.overflow-hidden{overflow:hidden}.overflow-y-auto{overflow-y:auto}.whitespace-nowrap{white-space:nowrap}.rounded-2xl{border-radius:1rem}.rounded-full{border-radius:9999px}.rounded-lg{border-radius:.5rem}.rounded-md{border-radius:.375rem}.border{border-width:1px}.border-gray-300{--tw-border-opacity: 1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}.border-gray-400{--tw-border-opacity: 1;border-color:rgb(156 163 175 / var(--tw-border-opacity))}.border-transparent{border-color:transparent}.bg-black{--tw-bg-opacity: 1;background-color:rgb(0 0 0 / var(--tw-bg-opacity))}.bg-blue-500{--tw-bg-opacity: 1;background-color:rgb(59 130 246 / var(--tw-bg-opacity))}.bg-blue-600{--tw-bg-opacity: 1;background-color:rgb(37 99 235 / var(--tw-bg-opacity))}.bg-blue-700{--tw-bg-opacity: 1;background-color:rgb(29 78 216 / var(--tw-bg-opacity))}.bg-gray-200{--tw-bg-opacity: 1;background-color:rgb(229 231 235 / var(--tw-bg-opacity))}.bg-gray-50{--tw-bg-opacity: 1;background-color:rgb(249 250 251 / var(--tw-bg-opacity))}.bg-green-500{--tw-bg-opacity: 1;background-color:rgb(34 197 94 / var(--tw-bg-opacity))}.bg-indigo-100{--tw-bg-opacity: 1;background-color:rgb(224 231 255 / var(--tw-bg-opacity))}.bg-indigo-600{--tw-bg-opacity: 1;background-color:rgb(79 70 229 / var(--tw-bg-opacity))}.bg-slate-200{--tw-bg-opacity: 1;background-color:rgb(226 232 240 / var(--tw-bg-opacity))}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity))}.bg-opacity-25{--tw-bg-opacity: .25}.p-2{padding:.5rem}.p-2\.5{padding:.625rem}.p-4{padding:1rem}.p-6{padding:1.5rem}.px-1{padding-left:.25rem;padding-right:.25rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-4{padding-left:1rem;padding-right:1rem}.px-5{padding-left:1.25rem;padding-right:1.25rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-2\.5{padding-top:.625rem;padding-bottom:.625rem}.text-left{text-align:left}.text-center{text-align:center}.text-right{text-align:right}.align-middle{vertical-align:middle}.text-5xl{font-size:3rem;line-height:1}.text-lg{font-size:1.125rem;line-height:1.75rem}.text-sm{font-size:.875rem;line-height:1.25rem}.font-extrabold{font-weight:800}.font-medium{font-weight:500}.font-semibold{font-weight:600}.leading-6{line-height:1.5rem}.tracking-tight{letter-spacing:-.025em}.text-gray-500{--tw-text-opacity: 1;color:rgb(107 114 128 / var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity: 1;color:rgb(17 24 39 / var(--tw-text-opacity))}.text-indigo-100{--tw-text-opacity: 1;color:rgb(224 231 255 / var(--tw-text-opacity))}.text-indigo-900{--tw-text-opacity: 1;color:rgb(49 46 129 / var(--tw-text-opacity))}.text-slate-500{--tw-text-opacity: 1;color:rgb(100 116 139 / var(--tw-text-opacity))}.text-slate-900{--tw-text-opacity: 1;color:rgb(15 23 42 / var(--tw-text-opacity))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.opacity-0{opacity:0}.opacity-100{opacity:1}.shadow-xl{--tw-shadow: 0 20px 25px -5px rgb(0 0 0 / .1), 0 8px 10px -6px rgb(0 0 0 / .1);--tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-black\/5{--tw-shadow-color: rgb(0 0 0 / .05);--tw-shadow: var(--tw-shadow-colored)}.ring-1{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.ring-slate-700\/10{--tw-ring-color: rgb(51 65 85 / .1)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.duration-100{transition-duration:.1s}.duration-200{transition-duration:.2s}.duration-300{transition-duration:.3s}.ease-in{transition-timing-function:cubic-bezier(.4,0,1,1)}.ease-out{transition-timing-function:cubic-bezier(0,0,.2,1)}html,body,#root{height:100%}audio::-webkit-media-controls-panel{background-color:#fff}.container{width:41rem;max-width:95vw}.hover\:bg-blue-800:hover{--tw-bg-opacity: 1;background-color:rgb(30 64 175 / var(--tw-bg-opacity))}.hover\:bg-green-600:hover{--tw-bg-opacity: 1;background-color:rgb(22 163 74 / var(--tw-bg-opacity))}.hover\:bg-indigo-200:hover{--tw-bg-opacity: 1;background-color:rgb(199 210 254 / var(--tw-bg-opacity))}.hover\:bg-indigo-50:hover{--tw-bg-opacity: 1;background-color:rgb(238 242 255 / var(--tw-bg-opacity))}.hover\:bg-indigo-500:hover{--tw-bg-opacity: 1;background-color:rgb(99 102 241 / var(--tw-bg-opacity))}.hover\:text-indigo-600:hover{--tw-text-opacity: 1;color:rgb(79 70 229 / var(--tw-text-opacity))}.focus\:border-blue-500:focus{--tw-border-opacity: 1;border-color:rgb(59 130 246 / var(--tw-border-opacity))}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.focus\:ring-4:focus{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.focus\:ring-blue-300:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(147 197 253 / var(--tw-ring-opacity))}.focus\:ring-blue-500:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity))}.focus\:ring-green-300:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(134 239 172 / var(--tw-ring-opacity))}.focus-visible\:ring-2:focus-visible{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.focus-visible\:ring-indigo-500:focus-visible{--tw-ring-opacity: 1;--tw-ring-color: rgb(99 102 241 / var(--tw-ring-opacity))}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width: 2px}@media (prefers-color-scheme: dark){.dark\:border-gray-600{--tw-border-opacity: 1;border-color:rgb(75 85 99 / var(--tw-border-opacity))}.dark\:bg-blue-600{--tw-bg-opacity: 1;background-color:rgb(37 99 235 / var(--tw-bg-opacity))}.dark\:bg-gray-700{--tw-bg-opacity: 1;background-color:rgb(55 65 81 / var(--tw-bg-opacity))}.dark\:bg-green-600{--tw-bg-opacity: 1;background-color:rgb(22 163 74 / var(--tw-bg-opacity))}.dark\:text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.dark\:placeholder-gray-400::-moz-placeholder{--tw-placeholder-opacity: 1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}.dark\:placeholder-gray-400::placeholder{--tw-placeholder-opacity: 1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}.dark\:hover\:bg-blue-700:hover{--tw-bg-opacity: 1;background-color:rgb(29 78 216 / var(--tw-bg-opacity))}.dark\:hover\:bg-green-700:hover{--tw-bg-opacity: 1;background-color:rgb(21 128 61 / var(--tw-bg-opacity))}.dark\:focus\:border-blue-500:focus{--tw-border-opacity: 1;border-color:rgb(59 130 246 / var(--tw-border-opacity))}.dark\:focus\:ring-blue-500:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity))}.dark\:focus\:ring-blue-800:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(30 64 175 / var(--tw-ring-opacity))}.dark\:focus\:ring-green-800:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(22 101 52 / var(--tw-ring-opacity))}}@media (min-width: 640px){.sm\:text-2xl{font-size:1.5rem;line-height:2rem}.sm\:text-7xl{font-size:4.5rem;line-height:1}}
diff --git a/spaces/Pie31415/control-animation/annotator/openpose/util.py b/spaces/Pie31415/control-animation/annotator/openpose/util.py
deleted file mode 100644
index 6f91ae0e65abaf0cbd62d803f56498991141e61b..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/openpose/util.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import math
-import numpy as np
-import matplotlib
-import cv2
-
-
-def padRightDownCorner(img, stride, padValue):
- h = img.shape[0]
- w = img.shape[1]
-
- pad = 4 * [None]
- pad[0] = 0 # up
- pad[1] = 0 # left
- pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
- pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
-
- img_padded = img
- pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
- img_padded = np.concatenate((pad_up, img_padded), axis=0)
- pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
- img_padded = np.concatenate((pad_left, img_padded), axis=1)
- pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
- img_padded = np.concatenate((img_padded, pad_down), axis=0)
- pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
- img_padded = np.concatenate((img_padded, pad_right), axis=1)
-
- return img_padded, pad
-
-# transfer caffe model to pytorch which will match the layer name
-def transfer(model, model_weights):
- transfered_model_weights = {}
- for weights_name in model.state_dict().keys():
- transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
- return transfered_model_weights
-
-# draw the body keypoint and lims
-def draw_bodypose(canvas, candidate, subset):
- stickwidth = 4
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
- [1, 16], [16, 18], [3, 17], [6, 18]]
-
- colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
- [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
- [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
- for i in range(18):
- for n in range(len(subset)):
- index = int(subset[n][i])
- if index == -1:
- continue
- x, y = candidate[index][0:2]
- cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
- for i in range(17):
- for n in range(len(subset)):
- index = subset[n][np.array(limbSeq[i]) - 1]
- if -1 in index:
- continue
- cur_canvas = canvas.copy()
- Y = candidate[index.astype(int), 0]
- X = candidate[index.astype(int), 1]
- mX = np.mean(X)
- mY = np.mean(Y)
- length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
- angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
- polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
- canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
- # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
- # plt.imshow(canvas[:, :, [2, 1, 0]])
- return canvas
-
-
-# image drawed by opencv is not good.
-def draw_handpose(canvas, all_hand_peaks, show_number=False):
- edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
- [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
-
- for peaks in all_hand_peaks:
- for ie, e in enumerate(edges):
- if np.sum(np.all(peaks[e], axis=1)==0)==0:
- x1, y1 = peaks[e[0]]
- x2, y2 = peaks[e[1]]
- cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
-
- for i, keyponit in enumerate(peaks):
- x, y = keyponit
- cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
- if show_number:
- cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
- return canvas
-
-# detect hand according to body pose keypoints
-# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
-def handDetect(candidate, subset, oriImg):
- # right hand: wrist 4, elbow 3, shoulder 2
- # left hand: wrist 7, elbow 6, shoulder 5
- ratioWristElbow = 0.33
- detect_result = []
- image_height, image_width = oriImg.shape[0:2]
- for person in subset.astype(int):
- # if any of three not detected
- has_left = np.sum(person[[5, 6, 7]] == -1) == 0
- has_right = np.sum(person[[2, 3, 4]] == -1) == 0
- if not (has_left or has_right):
- continue
- hands = []
- #left hand
- if has_left:
- left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
- x1, y1 = candidate[left_shoulder_index][:2]
- x2, y2 = candidate[left_elbow_index][:2]
- x3, y3 = candidate[left_wrist_index][:2]
- hands.append([x1, y1, x2, y2, x3, y3, True])
- # right hand
- if has_right:
- right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
- x1, y1 = candidate[right_shoulder_index][:2]
- x2, y2 = candidate[right_elbow_index][:2]
- x3, y3 = candidate[right_wrist_index][:2]
- hands.append([x1, y1, x2, y2, x3, y3, False])
-
- for x1, y1, x2, y2, x3, y3, is_left in hands:
- # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
- # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
- # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
- # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
- # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
- # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
- x = x3 + ratioWristElbow * (x3 - x2)
- y = y3 + ratioWristElbow * (y3 - y2)
- distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
- distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
- width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
- # x-y refers to the center --> offset to topLeft point
- # handRectangle.x -= handRectangle.width / 2.f;
- # handRectangle.y -= handRectangle.height / 2.f;
- x -= width / 2
- y -= width / 2 # width = height
- # overflow the image
- if x < 0: x = 0
- if y < 0: y = 0
- width1 = width
- width2 = width
- if x + width > image_width: width1 = image_width - x
- if y + width > image_height: width2 = image_height - y
- width = min(width1, width2)
- # the max hand box value is 20 pixels
- if width >= 20:
- detect_result.append([int(x), int(y), int(width), is_left])
-
- '''
- return value: [[x, y, w, True if left hand else False]].
- width=height since the network require squared input.
- x, y is the coordinate of top left
- '''
- return detect_result
-
-# get max index of 2d array
-def npmax(array):
- arrayindex = array.argmax(1)
- arrayvalue = array.max(1)
- i = arrayvalue.argmax()
- j = arrayindex[i]
- return i, j
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/__init__.py
deleted file mode 100644
index d0051d609d3de4e7562e3fe638335c66617c4d91..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr,
- gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert,
- rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb)
-from .geometric import (cutout, imcrop, imflip, imflip_, impad,
- impad_to_multiple, imrescale, imresize, imresize_like,
- imresize_to_multiple, imrotate, imshear, imtranslate,
- rescale_size)
-from .io import imfrombytes, imread, imwrite, supported_backends, use_backend
-from .misc import tensor2imgs
-from .photometric import (adjust_brightness, adjust_color, adjust_contrast,
- adjust_lighting, adjust_sharpness, auto_contrast,
- clahe, imdenormalize, imequalize, iminvert,
- imnormalize, imnormalize_, lut_transform, posterize,
- solarize)
-
-__all__ = [
- 'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb',
- 'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale',
- 'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size',
- 'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate',
- 'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend',
- 'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize',
- 'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr',
- 'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize',
- 'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe',
- 'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting'
-]
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/__init__.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/__init__.py
deleted file mode 100644
index b0804ff9446160fdad093af0b0fcff2e45fddb76..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-from .coco import COCODataset
-from .voc import PascalVOCDataset
-from .concat_dataset import ConcatDataset
-from .background import Background
-from .tsv import TSVDataset, ODTSVDataset
-
-from .modulated_coco import ModulatedDataset, CocoDetection, CocoGrounding
-from .flickr import FlickrDataset
-from .refexp import RefExpDataset
-from .mixed import MixedDataset
-from .gqa import GQADataset
-
-from .coco_dt import CocoDetectionTSV
-from .caption import CaptionTSV
-from .lvis import LvisDetection
-from .pseudo_data import PseudoData
-from .phrasecut import PhrasecutDetection
-
-__all__ = ["COCODataset", "TSVDataset", "ODTSVDataset", "ConcatDataset", "PascalVOCDataset", "Background",
- "ModulatedDataset", "MixedDataset", "CocoDetection", "FlickrDataset", "RefExpDataset", "GQADataset",
- "CocoDetectionTSV", "CocoGrounding", "CaptionTSV", "LvisDetection", "PseudoData", "PhrasecutDetection"
- ]
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/box_coder.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/box_coder.py
deleted file mode 100644
index 46a4acb3247003da2e6e24a4d28deb86de7d7aae..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/box_coder.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-import math
-
-import torch
-
-
-class BoxCoder(object):
- """
- This class encodes and decodes a set of bounding boxes into
- the representation used for training the regressors.
- """
-
- def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
- """
- Arguments:
- weights (4-element tuple)
- bbox_xform_clip (float)
- """
- self.weights = weights
- self.bbox_xform_clip = bbox_xform_clip
-
- def encode(self, reference_boxes, proposals):
- """
- Encode a set of proposals with respect to some
- reference boxes
-
- Arguments:
- reference_boxes (Tensor): reference boxes
- proposals (Tensor): boxes to be encoded
- """
-
- TO_REMOVE = 1 # TODO remove
- ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
- ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
- ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
- ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
-
- gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
- gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
- gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
- gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
-
- wx, wy, ww, wh = self.weights
- targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
- targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
- targets_dw = ww * torch.log(gt_widths / ex_widths)
- targets_dh = wh * torch.log(gt_heights / ex_heights)
-
- targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
- return targets
-
- def decode(self, rel_codes, boxes):
- """
- From a set of original boxes and encoded relative box offsets,
- get the decoded boxes.
-
- Arguments:
- rel_codes (Tensor): encoded boxes
- boxes (Tensor): reference boxes.
- """
-
- boxes = boxes.to(rel_codes.dtype)
-
- TO_REMOVE = 1 # TODO remove
- widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
- heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
- ctr_x = boxes[:, 0] + 0.5 * widths
- ctr_y = boxes[:, 1] + 0.5 * heights
-
- wx, wy, ww, wh = self.weights
- dx = rel_codes[:, 0::4] / wx
- dy = rel_codes[:, 1::4] / wy
- dw = rel_codes[:, 2::4] / ww
- dh = rel_codes[:, 3::4] / wh
-
- # Prevent sending too large values into torch.exp()
- dw = torch.clamp(dw, max=self.bbox_xform_clip)
- dh = torch.clamp(dh, max=self.bbox_xform_clip)
-
- pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
- pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
- pred_w = torch.exp(dw) * widths[:, None]
- pred_h = torch.exp(dh) * heights[:, None]
-
- pred_boxes = torch.zeros_like(rel_codes)
- # x1
- pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
- # y1
- pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
- # x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
- pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
- # y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
- pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
-
- return pred_boxes
diff --git a/spaces/Pritish100/AA0_LeLO_v_2.0/README.md b/spaces/Pritish100/AA0_LeLO_v_2.0/README.md
deleted file mode 100644
index 03c8c20b6529452b12ddfdf8638bed17a527b2ae..0000000000000000000000000000000000000000
--- a/spaces/Pritish100/AA0_LeLO_v_2.0/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: AAO_LeLO_V2.0
-emoji: 🏃
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.1.4b5
-app_file: app.py
-pinned: false
-license: mit
-python_version: 3.8.5
-duplicated_from: Pritish100/LaTeX-OCR-demo
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_4.sh b/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_4.sh
deleted file mode 100644
index d7f19345304df1e27094e781ee6de47764ba11ea..0000000000000000000000000000000000000000
--- a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_4.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#SBATCH -p gpu
-#SBATCH --mem=32g
-#SBATCH --gres=gpu:rtx2080:1
-#SBATCH -c 3
-#SBATCH --output=example_4.out
-
-source activate mlfold
-
-folder_with_pdbs="../inputs/PDB_complexes/pdbs/"
-
-output_dir="../outputs/example_4_outputs"
-if [ ! -d $output_dir ]
-then
- mkdir -p $output_dir
-fi
-
-
-path_for_parsed_chains=$output_dir"/parsed_pdbs.jsonl"
-path_for_assigned_chains=$output_dir"/assigned_pdbs.jsonl"
-path_for_fixed_positions=$output_dir"/fixed_pdbs.jsonl"
-chains_to_design="A C"
-#The first amino acid in the chain corresponds to 1 and not PDB residues index for now.
-fixed_positions="1 2 3 4 5 6 7 8 23 25, 10 11 12 13 14 15 16 17 18 19 20 40" #fixing/not designing residues 1 2 3...25 in chain A and residues 10 11 12...40 in chain C
-
-python ../helper_scripts/parse_multiple_chains.py --input_path=$folder_with_pdbs --output_path=$path_for_parsed_chains
-
-python ../helper_scripts/assign_fixed_chains.py --input_path=$path_for_parsed_chains --output_path=$path_for_assigned_chains --chain_list "$chains_to_design"
-
-python ../helper_scripts/make_fixed_positions_dict.py --input_path=$path_for_parsed_chains --output_path=$path_for_fixed_positions --chain_list "$chains_to_design" --position_list "$fixed_positions"
-
-python ../protein_mpnn_run.py \
- --jsonl_path $path_for_parsed_chains \
- --chain_id_jsonl $path_for_assigned_chains \
- --fixed_positions_jsonl $path_for_fixed_positions \
- --out_folder $output_dir \
- --num_seq_per_target 2 \
- --sampling_temp "0.1" \
- --seed 37 \
- --batch_size 1
diff --git a/spaces/PureNaCl/Toxic-Tweets-MS2/app.py b/spaces/PureNaCl/Toxic-Tweets-MS2/app.py
deleted file mode 100644
index 4c48e893a32d25c2fb5fcacbcbe07985919c148a..0000000000000000000000000000000000000000
--- a/spaces/PureNaCl/Toxic-Tweets-MS2/app.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import easyocr as ocr #OCR
-import streamlit as st #Web App
-from PIL import Image #Image Processing
-import numpy as np #Image Processing
-from transformers import pipeline, DistilBertTokenizer, DistilBertModel
-import torch
-import pandas as pd
-import math
-from transformers.modeling_utils import PreTrainedModel
-
-#took this from an online article for training multi label and multi class models
-#https://github.com/DhavalTaunk08/NLP_scripts/blob/master/Transformers_multilabel_distilbert.ipynb
-
-class NNDistilBertClass(torch.nn.Module):
- def forward(self, ids, am):
- hs = self.l1(input_ids=ids, attention_mask=am)[0]
- result = self.classifier(self.dropout(torch.nn.Tanh()(self.pre_classifier(hs[:, 0]))))
- return result
-
-def logits_to_probability(output):
- return [logit_to_probability(i) for i in output[0]]
-
-def logit_to_probability(logit):
- e = np.exp(logit)
- return (e)/(1+e)
-
-
-
-def detect_toxic_tweet(text, model, tokenizer):
- input_format = tokenizer.encode_plus(
- text,
- None,
- pad_to_max_length=True,
- max_length=512,
- return_token_type_ids=True,
- add_special_tokens=True,
- )
- output = model(torch.tensor(input_format['input_ids'], dtype=torch.long).to(device, dtype = torch.long), torch.tensor(input_format['attention_mask'], dtype=torch.long).to(device, dtype = torch.long))
- return output
-device = 'cpu'
-toxic_tweet_model = torch.load("models/distilbert_final.bin")
-toxic_tweet_tokenizer = DistilBertTokenizer.from_pretrained("models/distilbert_token_final.bin")
-#https://huggingface.co/transformers/v3.3.1/pretrained_models.html
-#//*[@id="pretrained-models"]/div/table/tbody/tr/td/p/code/span
-id2label = {0: "NEGATIVE", 1: "POSITIVE"}
-DEFAULT_TEXT = "I have a dream that one day this nation will rise up and live out the true meaning of its creed: We hold these truths to be self-evident, that all men are created equal. \
- I have a dream that one day on the red hills of Georgia, the sons of former slaves and the sons of former slave owners will be able to sit down together at the table of brotherhood. \
- I have a dream that one day even the state of Mississippi, a state sweltering with the heat of injustice, sweltering with the heat of oppression, will be transformed into an oasis of freedom and justice. \
- I have a dream that my four little children will one day live in a nation where they will not be judged by the color of their skin but by the content of their character. \
- "
-RESULT_FORMAT = "Result: {} Confidence: {}"
-
-TWEET_RESULT_FORMAT = "This Text is: {}"
-
-DEFAULT_TWEET = "Yo bitch Ja Rule is more succesful then you'll ever be whats up with you and hating you sad mofuckas...i should bitch slap ur pethedic white faces and get you to kiss my ass you guys sicken me. Ja rule is about pride in da music man. dont diss that shit on him. and nothin is wrong bein like tupac he was a brother too...fuckin white boys get things right next time.,"
-
-TOXIC_TWEET_CATEGORIES = ["Toxic", "Severe Toxic", "Obscene", "Threat", "Insult", "Identity Hate", "Non Toxic"]
-
-def main():
- model_names = None
- with open('model-names.txt', 'r') as f:
- model_names = f.readlines()
- model_names = [m.strip() for m in model_names]
-
-
-
- st.title("Toxic Tweets")
- with st.form(key= "form1"):
- user_text = st.text_area("Text Input", DEFAULT_TWEET)
- option = st.selectbox('What model would you like to use', model_names)
- submit_btn = st.form_submit_button("Analyze Text")
-
-
- if(submit_btn):
- if(option != "Milestone-3-DistilBertModel"):
- with st.spinner('Loading Model This May Take A While...'):
- pipe = pipeline("text-classification",model=option)
- with st.spinner('Analyzing Text...'):
- result = pipe(user_text)
- if("0" in result[0]['label'] ):
- result[0]['label'] = id2label[0]
- if("1" in result[0]['label'] ):
- result[0]['label'] = id2label[1]
- st.success(RESULT_FORMAT.format(result[0]['label'], result[0]['score']))
- else:
- with st.spinner('Analyzing Text...'):
- output = detect_toxic_tweet(user_text, toxic_tweet_model, toxic_tweet_tokenizer)
- probs = logits_to_probability(output.cpu().detach().numpy())
- df = pd.DataFrame()
- max_prob = 0
- max_cat = 6
- res = []
- for i, val in enumerate(probs):
- if(val >= 0.5):
- res.append(TOXIC_TWEET_CATEGORIES[i])
- if(val >= max_prob):
- max_prob = val
- max_cat = i
-
-
- df["Text"] = [user_text[ : (len(user_text) if len(user_text) < 20 else 10) ]]
- df["Category"] = TOXIC_TWEET_CATEGORIES[max_cat]
- df["Probability"] = max_prob
- max_sub_cat = 6
- max_sub_prob = 0
- for i, val in enumerate(probs[2:]):
- if(val >= max_sub_prob):
- max_sub_prob = val
- max_sub_cat = i
- df['SubCategory'] = TOXIC_TWEET_CATEGORIES[max_sub_cat+2]
- df['SubCategory Probability'] = max_sub_prob
- if(len(res) == 0):
- st.success(TWEET_RESULT_FORMAT.format(TOXIC_TWEET_CATEGORIES[-1]))
- else:
- st.success(TWEET_RESULT_FORMAT.format(res))
- st.table(df)
-
-
-
-
-
-
-
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/sflckr.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/sflckr.py
deleted file mode 100644
index 91101be5953b113f1e58376af637e43f366b3dee..0000000000000000000000000000000000000000
--- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/sflckr.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import os
-import numpy as np
-import cv2
-import albumentations
-from PIL import Image
-from torch.utils.data import Dataset
-
-
-class SegmentationBase(Dataset):
- def __init__(self,
- data_csv, data_root, segmentation_root,
- size=None, random_crop=False, interpolation="bicubic",
- n_labels=182, shift_segmentation=False,
- ):
- self.n_labels = n_labels
- self.shift_segmentation = shift_segmentation
- self.data_csv = data_csv
- self.data_root = data_root
- self.segmentation_root = segmentation_root
- with open(self.data_csv, "r") as f:
- self.image_paths = f.read().splitlines()
- self._length = len(self.image_paths)
- self.labels = {
- "relative_file_path_": [l for l in self.image_paths],
- "file_path_": [os.path.join(self.data_root, l)
- for l in self.image_paths],
- "segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
- for l in self.image_paths]
- }
-
- size = None if size is not None and size<=0 else size
- self.size = size
- if self.size is not None:
- self.interpolation = interpolation
- self.interpolation = {
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "bicubic": cv2.INTER_CUBIC,
- "area": cv2.INTER_AREA,
- "lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
- self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
- interpolation=self.interpolation)
- self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
- interpolation=cv2.INTER_NEAREST)
- self.center_crop = not random_crop
- if self.center_crop:
- self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
- else:
- self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
- self.preprocessor = self.cropper
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, i):
- example = dict((k, self.labels[k][i]) for k in self.labels)
- image = Image.open(example["file_path_"])
- if not image.mode == "RGB":
- image = image.convert("RGB")
- image = np.array(image).astype(np.uint8)
- if self.size is not None:
- image = self.image_rescaler(image=image)["image"]
- segmentation = Image.open(example["segmentation_path_"])
- assert segmentation.mode == "L", segmentation.mode
- segmentation = np.array(segmentation).astype(np.uint8)
- if self.shift_segmentation:
- # used to support segmentations containing unlabeled==255 label
- segmentation = segmentation+1
- if self.size is not None:
- segmentation = self.segmentation_rescaler(image=segmentation)["image"]
- if self.size is not None:
- processed = self.preprocessor(image=image,
- mask=segmentation
- )
- else:
- processed = {"image": image,
- "mask": segmentation
- }
- example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
- segmentation = processed["mask"]
- onehot = np.eye(self.n_labels)[segmentation]
- example["segmentation"] = onehot
- return example
-
-
-class Examples(SegmentationBase):
- def __init__(self, size=None, random_crop=False, interpolation="bicubic"):
- super().__init__(data_csv="data/sflckr_examples.txt",
- data_root="data/sflckr_images",
- segmentation_root="data/sflckr_segmentations",
- size=size, random_crop=random_crop, interpolation=interpolation)
diff --git a/spaces/QINGCHE/TSA/BERT_inference.py b/spaces/QINGCHE/TSA/BERT_inference.py
deleted file mode 100644
index 2b57651aa5616a7e879a67dcd295bcb753631cd3..0000000000000000000000000000000000000000
--- a/spaces/QINGCHE/TSA/BERT_inference.py
+++ /dev/null
@@ -1,18 +0,0 @@
-
-import transformers
-import torch.nn as nn
-
-class BertClassificationModel(nn.Module):
- def __init__(self):
- super(BertClassificationModel, self).__init__()
- pretrained_weights="bert-base-chinese"
- self.bert = transformers.BertModel.from_pretrained(pretrained_weights)
- for param in self.bert.parameters():
- param.requires_grad = True
- self.dense = nn.Linear(768, 3)
-
- def forward(self, input_ids,token_type_ids,attention_mask):
- bert_output = self.bert(input_ids=input_ids,token_type_ids=token_type_ids, attention_mask=attention_mask)
- bert_cls_hidden_state = bert_output[1]
- linear_output = self.dense(bert_cls_hidden_state)
- return linear_output
\ No newline at end of file
diff --git a/spaces/RMXK/RVC_HFF/go-applio.bat b/spaces/RMXK/RVC_HFF/go-applio.bat
deleted file mode 100644
index 60c0c41d34a8aee5e14e744accb33d028d807245..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/go-applio.bat
+++ /dev/null
@@ -1,92 +0,0 @@
-@echo off
-setlocal
-title Start Applio
-
-:::
-::: _ _
-::: /\ | (_)
-::: / \ _ __ _ __ | |_ ___
-::: / /\ \ | '_ \| '_ \| | |/ _ \
-::: / ____ \| |_) | |_) | | | (_) |
-::: /_/ \_\ .__/| .__/|_|_|\___/
-::: | | | |
-::: |_| |_|
-:::
-:::
-
-:menu
-for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A
-
-echo [1] Start Applio
-echo [2] Start Applio (DML)
-echo [3] Start Realtime GUI (DML)
-echo [4] Start Realtime GUI (V0)
-echo [5] Start Realtime GUI (V1)
-echo.
-
-set /p choice=Select an option:
-set choice=%choice: =%
-
-cls
-echo WARNING: It's recommended to disable antivirus or firewall, as errors might occur when starting the ssl.
-pause
-
-if "%choice%"=="1" (
- cls
- echo WARNING: At this point, it's recommended to disable antivirus or firewall, as errors might occur when downloading pretrained models.
- pause>null
- echo Starting Applio...
- echo.
- runtime\python.exe infer-web.py --pycmd runtime\python.exe --port 7897
- pause
- cls
- goto menu
-)
-
-if "%choice%"=="2" (
- cls
- echo Starting Applio ^(DML^)...
- echo.
- runtime\python.exe infer-web.py --pycmd runtime\python.exe --port 7897 --dml
- pause
- cls
- goto menu
-)
-
-if "%choice%"=="3" (
- cls
- echo Starting Realtime GUI ^(DML^)...
- echo.
- runtime\python.exe gui_v1.py --pycmd runtime\python.exe --dml
- pause
- cls
- goto menu
-)
-
-if "%choice%"=="4" (
- cls
- echo Starting Realtime GUI ^(V0^)...
- echo.
- runtime\python.exe gui_v0.py
- pause
- cls
- goto menu
-)
-
-if "%choice%"=="5" (
- cls
- echo Starting Realtime GUI ^(V1^)...
- echo.
- runtime\python.exe gui_v1.py
- pause
- cls
- goto menu
-)
-
-cls
-echo Invalid option. Please enter a number from 1 to 5.
-echo.
-echo Press 'Enter' to access the main menu...
-pause>nul
-cls
-goto menu
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/scope.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/scope.py
deleted file mode 100644
index 6822b8ca5429db9785881dd30e3964a655a64a88..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/scope.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from collections.abc import Mapping
-from typing import TYPE_CHECKING, Any, Optional, Tuple
-
-from .highlighter import ReprHighlighter
-from .panel import Panel
-from .pretty import Pretty
-from .table import Table
-from .text import Text, TextType
-
-if TYPE_CHECKING:
- from .console import ConsoleRenderable
-
-
-def render_scope(
- scope: "Mapping[str, Any]",
- *,
- title: Optional[TextType] = None,
- sort_keys: bool = True,
- indent_guides: bool = False,
- max_length: Optional[int] = None,
- max_string: Optional[int] = None,
-) -> "ConsoleRenderable":
- """Render python variables in a given scope.
-
- Args:
- scope (Mapping): A mapping containing variable names and values.
- title (str, optional): Optional title. Defaults to None.
- sort_keys (bool, optional): Enable sorting of items. Defaults to True.
- indent_guides (bool, optional): Enable indentaton guides. Defaults to False.
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
- Defaults to None.
- max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
-
- Returns:
- ConsoleRenderable: A renderable object.
- """
- highlighter = ReprHighlighter()
- items_table = Table.grid(padding=(0, 1), expand=False)
- items_table.add_column(justify="right")
-
- def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
- """Sort special variables first, then alphabetically."""
- key, _ = item
- return (not key.startswith("__"), key.lower())
-
- items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
- for key, value in items:
- key_text = Text.assemble(
- (key, "scope.key.special" if key.startswith("__") else "scope.key"),
- (" =", "scope.equals"),
- )
- items_table.add_row(
- key_text,
- Pretty(
- value,
- highlighter=highlighter,
- indent_guides=indent_guides,
- max_length=max_length,
- max_string=max_string,
- ),
- )
- return Panel.fit(
- items_table,
- title=title,
- border_style="scope.border",
- padding=(0, 1),
- )
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich import print
-
- print()
-
- def test(foo: float, bar: float) -> None:
- list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
- dict_of_things = {
- "version": "1.1",
- "method": "confirmFruitPurchase",
- "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
- "id": "194521489",
- }
- print(render_scope(locals(), title="[i]locals", sort_keys=False))
-
- test(20.3423, 3.1427)
- print()
diff --git a/spaces/Realcat/image-matching-webui/hloc/extractors/r2d2.py b/spaces/Realcat/image-matching-webui/hloc/extractors/r2d2.py
deleted file mode 100644
index bd22a38ae358c60fce57d2832a183d794912e260..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/hloc/extractors/r2d2.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import sys
-from pathlib import Path
-import torchvision.transforms as tvf
-
-from ..utils.base_model import BaseModel
-
-base_path = Path(__file__).parent / "../../third_party"
-sys.path.append(str(base_path))
-r2d2_path = Path(__file__).parent / "../../third_party/r2d2"
-from r2d2.extract import load_network, NonMaxSuppression, extract_multiscale
-
-
-class R2D2(BaseModel):
- default_conf = {
- "model_name": "r2d2_WASF_N16.pt",
- "max_keypoints": 5000,
- "scale_factor": 2**0.25,
- "min_size": 256,
- "max_size": 1024,
- "min_scale": 0,
- "max_scale": 1,
- "reliability_threshold": 0.7,
- "repetability_threshold": 0.7,
- }
- required_inputs = ["image"]
-
- def _init(self, conf):
- model_fn = r2d2_path / "models" / conf["model_name"]
- self.norm_rgb = tvf.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
- )
- self.net = load_network(model_fn)
- self.detector = NonMaxSuppression(
- rel_thr=conf["reliability_threshold"],
- rep_thr=conf["repetability_threshold"],
- )
-
- def _forward(self, data):
- img = data["image"]
- img = self.norm_rgb(img)
-
- xys, desc, scores = extract_multiscale(
- self.net,
- img,
- self.detector,
- scale_f=self.conf["scale_factor"],
- min_size=self.conf["min_size"],
- max_size=self.conf["max_size"],
- min_scale=self.conf["min_scale"],
- max_scale=self.conf["max_scale"],
- )
- idxs = scores.argsort()[-self.conf["max_keypoints"] or None :]
- xy = xys[idxs, :2]
- desc = desc[idxs].t()
- scores = scores[idxs]
-
- pred = {
- "keypoints": xy[None],
- "descriptors": desc[None],
- "scores": scores[None],
- }
- return pred
diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/utils.py b/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/utils.py
deleted file mode 100644
index 969e1003419f3b7f05874830b79de73363017f01..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/utils.py
+++ /dev/null
@@ -1,703 +0,0 @@
-import warnings
-import numpy as np
-import cv2
-import math
-import torch
-from torchvision import transforms
-from torchvision.transforms.functional import InterpolationMode
-import torch.nn.functional as F
-from PIL import Image
-import kornia
-
-
-def recover_pose(E, kpts0, kpts1, K0, K1, mask):
- best_num_inliers = 0
- K0inv = np.linalg.inv(K0[:2, :2])
- K1inv = np.linalg.inv(K1[:2, :2])
-
- kpts0_n = (K0inv @ (kpts0 - K0[None, :2, 2]).T).T
- kpts1_n = (K1inv @ (kpts1 - K1[None, :2, 2]).T).T
-
- for _E in np.split(E, len(E) / 3):
- n, R, t, _ = cv2.recoverPose(_E, kpts0_n, kpts1_n, np.eye(3), 1e9, mask=mask)
- if n > best_num_inliers:
- best_num_inliers = n
- ret = (R, t, mask.ravel() > 0)
- return ret
-
-
-# Code taken from https://github.com/PruneTruong/DenseMatching/blob/40c29a6b5c35e86b9509e65ab0cd12553d998e5f/validation/utils_pose_estimation.py
-# --- GEOMETRY ---
-def estimate_pose(kpts0, kpts1, K0, K1, norm_thresh, conf=0.99999):
- if len(kpts0) < 5:
- return None
- K0inv = np.linalg.inv(K0[:2, :2])
- K1inv = np.linalg.inv(K1[:2, :2])
-
- kpts0 = (K0inv @ (kpts0 - K0[None, :2, 2]).T).T
- kpts1 = (K1inv @ (kpts1 - K1[None, :2, 2]).T).T
- E, mask = cv2.findEssentialMat(
- kpts0, kpts1, np.eye(3), threshold=norm_thresh, prob=conf
- )
-
- ret = None
- if E is not None:
- best_num_inliers = 0
-
- for _E in np.split(E, len(E) / 3):
- n, R, t, _ = cv2.recoverPose(_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
- if n > best_num_inliers:
- best_num_inliers = n
- ret = (R, t, mask.ravel() > 0)
- return ret
-
-
-def estimate_pose_uncalibrated(kpts0, kpts1, K0, K1, norm_thresh, conf=0.99999):
- if len(kpts0) < 5:
- return None
- method = cv2.USAC_ACCURATE
- F, mask = cv2.findFundamentalMat(
- kpts0,
- kpts1,
- ransacReprojThreshold=norm_thresh,
- confidence=conf,
- method=method,
- maxIters=10000,
- )
- E = K1.T @ F @ K0
- ret = None
- if E is not None:
- best_num_inliers = 0
- K0inv = np.linalg.inv(K0[:2, :2])
- K1inv = np.linalg.inv(K1[:2, :2])
-
- kpts0_n = (K0inv @ (kpts0 - K0[None, :2, 2]).T).T
- kpts1_n = (K1inv @ (kpts1 - K1[None, :2, 2]).T).T
-
- for _E in np.split(E, len(E) / 3):
- n, R, t, _ = cv2.recoverPose(
- _E, kpts0_n, kpts1_n, np.eye(3), 1e9, mask=mask
- )
- if n > best_num_inliers:
- best_num_inliers = n
- ret = (R, t, mask.ravel() > 0)
- return ret
-
-
-def unnormalize_coords(x_n, h, w):
- x = torch.stack(
- (w * (x_n[..., 0] + 1) / 2, h * (x_n[..., 1] + 1) / 2), dim=-1
- ) # [-1+1/h, 1-1/h] -> [0.5, h-0.5]
- return x
-
-
-def rotate_intrinsic(K, n):
- base_rot = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
- rot = np.linalg.matrix_power(base_rot, n)
- return rot @ K
-
-
-def rotate_pose_inplane(i_T_w, rot):
- rotation_matrices = [
- np.array(
- [
- [np.cos(r), -np.sin(r), 0.0, 0.0],
- [np.sin(r), np.cos(r), 0.0, 0.0],
- [0.0, 0.0, 1.0, 0.0],
- [0.0, 0.0, 0.0, 1.0],
- ],
- dtype=np.float32,
- )
- for r in [np.deg2rad(d) for d in (0, 270, 180, 90)]
- ]
- return np.dot(rotation_matrices[rot], i_T_w)
-
-
-def scale_intrinsics(K, scales):
- scales = np.diag([1.0 / scales[0], 1.0 / scales[1], 1.0])
- return np.dot(scales, K)
-
-
-def to_homogeneous(points):
- return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1)
-
-
-def angle_error_mat(R1, R2):
- cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2
- cos = np.clip(cos, -1.0, 1.0) # numercial errors can make it out of bounds
- return np.rad2deg(np.abs(np.arccos(cos)))
-
-
-def angle_error_vec(v1, v2):
- n = np.linalg.norm(v1) * np.linalg.norm(v2)
- return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0)))
-
-
-def compute_pose_error(T_0to1, R, t):
- R_gt = T_0to1[:3, :3]
- t_gt = T_0to1[:3, 3]
- error_t = angle_error_vec(t.squeeze(), t_gt)
- error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation
- error_R = angle_error_mat(R, R_gt)
- return error_t, error_R
-
-
-def pose_auc(errors, thresholds):
- sort_idx = np.argsort(errors)
- errors = np.array(errors.copy())[sort_idx]
- recall = (np.arange(len(errors)) + 1) / len(errors)
- errors = np.r_[0.0, errors]
- recall = np.r_[0.0, recall]
- aucs = []
- for t in thresholds:
- last_index = np.searchsorted(errors, t)
- r = np.r_[recall[:last_index], recall[last_index - 1]]
- e = np.r_[errors[:last_index], t]
- aucs.append(np.trapz(r, x=e) / t)
- return aucs
-
-
-# From Patch2Pix https://github.com/GrumpyZhou/patch2pix
-def get_depth_tuple_transform_ops_nearest_exact(resize=None):
- ops = []
- if resize:
- ops.append(TupleResizeNearestExact(resize))
- return TupleCompose(ops)
-
-
-def get_depth_tuple_transform_ops(resize=None, normalize=True, unscale=False):
- ops = []
- if resize:
- ops.append(TupleResize(resize, mode=InterpolationMode.BILINEAR))
- return TupleCompose(ops)
-
-
-def get_tuple_transform_ops(
- resize=None, normalize=True, unscale=False, clahe=False, colorjiggle_params=None
-):
- ops = []
- if resize:
- ops.append(TupleResize(resize))
- ops.append(TupleToTensorScaled())
- if normalize:
- ops.append(
- TupleNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ) # Imagenet mean/std
- return TupleCompose(ops)
-
-
-class ToTensorScaled(object):
- """Convert a RGB PIL Image to a CHW ordered Tensor, scale the range to [0, 1]"""
-
- def __call__(self, im):
- if not isinstance(im, torch.Tensor):
- im = np.array(im, dtype=np.float32).transpose((2, 0, 1))
- im /= 255.0
- return torch.from_numpy(im)
- else:
- return im
-
- def __repr__(self):
- return "ToTensorScaled(./255)"
-
-
-class TupleToTensorScaled(object):
- def __init__(self):
- self.to_tensor = ToTensorScaled()
-
- def __call__(self, im_tuple):
- return [self.to_tensor(im) for im in im_tuple]
-
- def __repr__(self):
- return "TupleToTensorScaled(./255)"
-
-
-class ToTensorUnscaled(object):
- """Convert a RGB PIL Image to a CHW ordered Tensor"""
-
- def __call__(self, im):
- return torch.from_numpy(np.array(im, dtype=np.float32).transpose((2, 0, 1)))
-
- def __repr__(self):
- return "ToTensorUnscaled()"
-
-
-class TupleToTensorUnscaled(object):
- """Convert a RGB PIL Image to a CHW ordered Tensor"""
-
- def __init__(self):
- self.to_tensor = ToTensorUnscaled()
-
- def __call__(self, im_tuple):
- return [self.to_tensor(im) for im in im_tuple]
-
- def __repr__(self):
- return "TupleToTensorUnscaled()"
-
-
-class TupleResizeNearestExact:
- def __init__(self, size):
- self.size = size
-
- def __call__(self, im_tuple):
- return [
- F.interpolate(im, size=self.size, mode="nearest-exact") for im in im_tuple
- ]
-
- def __repr__(self):
- return "TupleResizeNearestExact(size={})".format(self.size)
-
-
-class TupleResize(object):
- def __init__(self, size, mode=InterpolationMode.BICUBIC):
- self.size = size
- self.resize = transforms.Resize(size, mode)
-
- def __call__(self, im_tuple):
- return [self.resize(im) for im in im_tuple]
-
- def __repr__(self):
- return "TupleResize(size={})".format(self.size)
-
-
-class Normalize:
- def __call__(self, im):
- mean = im.mean(dim=(1, 2), keepdims=True)
- std = im.std(dim=(1, 2), keepdims=True)
- return (im - mean) / std
-
-
-class TupleNormalize(object):
- def __init__(self, mean, std):
- self.mean = mean
- self.std = std
- self.normalize = transforms.Normalize(mean=mean, std=std)
-
- def __call__(self, im_tuple):
- c, h, w = im_tuple[0].shape
- if c > 3:
- warnings.warn(f"Number of channels c={c} > 3, assuming first 3 are rgb")
- return [self.normalize(im[:3]) for im in im_tuple]
-
- def __repr__(self):
- return "TupleNormalize(mean={}, std={})".format(self.mean, self.std)
-
-
-class TupleCompose(object):
- def __init__(self, transforms):
- self.transforms = transforms
-
- def __call__(self, im_tuple):
- for t in self.transforms:
- im_tuple = t(im_tuple)
- return im_tuple
-
- def __repr__(self):
- format_string = self.__class__.__name__ + "("
- for t in self.transforms:
- format_string += "\n"
- format_string += " {0}".format(t)
- format_string += "\n)"
- return format_string
-
-
-@torch.no_grad()
-def cls_to_flow(cls, deterministic_sampling=True):
- B, C, H, W = cls.shape
- device = cls.device
- res = round(math.sqrt(C))
- G = torch.meshgrid(
- *[
- torch.linspace(-1 + 1 / res, 1 - 1 / res, steps=res, device=device)
- for _ in range(2)
- ]
- )
- G = torch.stack([G[1], G[0]], dim=-1).reshape(C, 2)
- if deterministic_sampling:
- sampled_cls = cls.max(dim=1).indices
- else:
- sampled_cls = torch.multinomial(
- cls.permute(0, 2, 3, 1).reshape(B * H * W, C).softmax(dim=-1), 1
- ).reshape(B, H, W)
- flow = G[sampled_cls]
- return flow
-
-
-@torch.no_grad()
-def cls_to_flow_refine(cls):
- B, C, H, W = cls.shape
- device = cls.device
- res = round(math.sqrt(C))
- G = torch.meshgrid(
- *[
- torch.linspace(-1 + 1 / res, 1 - 1 / res, steps=res, device=device)
- for _ in range(2)
- ]
- )
- G = torch.stack([G[1], G[0]], dim=-1).reshape(C, 2)
- cls = cls.softmax(dim=1)
- mode = cls.max(dim=1).indices
-
- index = (
- torch.stack((mode - 1, mode, mode + 1, mode - res, mode + res), dim=1)
- .clamp(0, C - 1)
- .long()
- )
- neighbours = torch.gather(cls, dim=1, index=index)[..., None]
- flow = (
- neighbours[:, 0] * G[index[:, 0]]
- + neighbours[:, 1] * G[index[:, 1]]
- + neighbours[:, 2] * G[index[:, 2]]
- + neighbours[:, 3] * G[index[:, 3]]
- + neighbours[:, 4] * G[index[:, 4]]
- )
- tot_prob = neighbours.sum(dim=1)
- flow = flow / tot_prob
- return flow
-
-
-def get_gt_warp(
- depth1,
- depth2,
- T_1to2,
- K1,
- K2,
- depth_interpolation_mode="bilinear",
- relative_depth_error_threshold=0.05,
- H=None,
- W=None,
-):
-
- if H is None:
- B, H, W = depth1.shape
- else:
- B = depth1.shape[0]
- with torch.no_grad():
- x1_n = torch.meshgrid(
- *[
- torch.linspace(-1 + 1 / n, 1 - 1 / n, n, device=depth1.device)
- for n in (B, H, W)
- ]
- )
- x1_n = torch.stack((x1_n[2], x1_n[1]), dim=-1).reshape(B, H * W, 2)
- mask, x2 = warp_kpts(
- x1_n.double(),
- depth1.double(),
- depth2.double(),
- T_1to2.double(),
- K1.double(),
- K2.double(),
- depth_interpolation_mode=depth_interpolation_mode,
- relative_depth_error_threshold=relative_depth_error_threshold,
- )
- prob = mask.float().reshape(B, H, W)
- x2 = x2.reshape(B, H, W, 2)
- return x2, prob
-
-
-@torch.no_grad()
-def warp_kpts(
- kpts0,
- depth0,
- depth1,
- T_0to1,
- K0,
- K1,
- smooth_mask=False,
- return_relative_depth_error=False,
- depth_interpolation_mode="bilinear",
- relative_depth_error_threshold=0.05,
-):
- """Warp kpts0 from I0 to I1 with depth, K and Rt
- Also check covisibility and depth consistency.
- Depth is consistent if relative error < 0.2 (hard-coded).
- # https://github.com/zju3dv/LoFTR/blob/94e98b695be18acb43d5d3250f52226a8e36f839/src/loftr/utils/geometry.py adapted from here
- Args:
- kpts0 (torch.Tensor): [N, L, 2] - , should be normalized in (-1,1)
- depth0 (torch.Tensor): [N, H, W],
- depth1 (torch.Tensor): [N, H, W],
- T_0to1 (torch.Tensor): [N, 3, 4],
- K0 (torch.Tensor): [N, 3, 3],
- K1 (torch.Tensor): [N, 3, 3],
- Returns:
- calculable_mask (torch.Tensor): [N, L]
- warped_keypoints0 (torch.Tensor): [N, L, 2]
- """
- (
- n,
- h,
- w,
- ) = depth0.shape
- if depth_interpolation_mode == "combined":
- # Inspired by approach in inloc, try to fill holes from bilinear interpolation by nearest neighbour interpolation
- if smooth_mask:
- raise NotImplementedError("Combined bilinear and NN warp not implemented")
- valid_bilinear, warp_bilinear = warp_kpts(
- kpts0,
- depth0,
- depth1,
- T_0to1,
- K0,
- K1,
- smooth_mask=smooth_mask,
- return_relative_depth_error=return_relative_depth_error,
- depth_interpolation_mode="bilinear",
- relative_depth_error_threshold=relative_depth_error_threshold,
- )
- valid_nearest, warp_nearest = warp_kpts(
- kpts0,
- depth0,
- depth1,
- T_0to1,
- K0,
- K1,
- smooth_mask=smooth_mask,
- return_relative_depth_error=return_relative_depth_error,
- depth_interpolation_mode="nearest-exact",
- relative_depth_error_threshold=relative_depth_error_threshold,
- )
- nearest_valid_bilinear_invalid = (~valid_bilinear).logical_and(valid_nearest)
- warp = warp_bilinear.clone()
- warp[nearest_valid_bilinear_invalid] = warp_nearest[
- nearest_valid_bilinear_invalid
- ]
- valid = valid_bilinear | valid_nearest
- return valid, warp
-
- kpts0_depth = F.grid_sample(
- depth0[:, None],
- kpts0[:, :, None],
- mode=depth_interpolation_mode,
- align_corners=False,
- )[:, 0, :, 0]
- kpts0 = torch.stack(
- (w * (kpts0[..., 0] + 1) / 2, h * (kpts0[..., 1] + 1) / 2), dim=-1
- ) # [-1+1/h, 1-1/h] -> [0.5, h-0.5]
- # Sample depth, get calculable_mask on depth != 0
- nonzero_mask = kpts0_depth != 0
-
- # Unproject
- kpts0_h = (
- torch.cat([kpts0, torch.ones_like(kpts0[:, :, [0]])], dim=-1)
- * kpts0_depth[..., None]
- ) # (N, L, 3)
- kpts0_n = K0.inverse() @ kpts0_h.transpose(2, 1) # (N, 3, L)
- kpts0_cam = kpts0_n
-
- # Rigid Transform
- w_kpts0_cam = T_0to1[:, :3, :3] @ kpts0_cam + T_0to1[:, :3, [3]] # (N, 3, L)
- w_kpts0_depth_computed = w_kpts0_cam[:, 2, :]
-
- # Project
- w_kpts0_h = (K1 @ w_kpts0_cam).transpose(2, 1) # (N, L, 3)
- w_kpts0 = w_kpts0_h[:, :, :2] / (
- w_kpts0_h[:, :, [2]] + 1e-4
- ) # (N, L, 2), +1e-4 to avoid zero depth
-
- # Covisible Check
- h, w = depth1.shape[1:3]
- covisible_mask = (
- (w_kpts0[:, :, 0] > 0)
- * (w_kpts0[:, :, 0] < w - 1)
- * (w_kpts0[:, :, 1] > 0)
- * (w_kpts0[:, :, 1] < h - 1)
- )
- w_kpts0 = torch.stack(
- (2 * w_kpts0[..., 0] / w - 1, 2 * w_kpts0[..., 1] / h - 1), dim=-1
- ) # from [0.5,h-0.5] -> [-1+1/h, 1-1/h]
- # w_kpts0[~covisible_mask, :] = -5 # xd
-
- w_kpts0_depth = F.grid_sample(
- depth1[:, None],
- w_kpts0[:, :, None],
- mode=depth_interpolation_mode,
- align_corners=False,
- )[:, 0, :, 0]
-
- relative_depth_error = (
- (w_kpts0_depth - w_kpts0_depth_computed) / w_kpts0_depth
- ).abs()
- if not smooth_mask:
- consistent_mask = relative_depth_error < relative_depth_error_threshold
- else:
- consistent_mask = (-relative_depth_error / smooth_mask).exp()
- valid_mask = nonzero_mask * covisible_mask * consistent_mask
- if return_relative_depth_error:
- return relative_depth_error, w_kpts0
- else:
- return valid_mask, w_kpts0
-
-
-imagenet_mean = torch.tensor([0.485, 0.456, 0.406])
-imagenet_std = torch.tensor([0.229, 0.224, 0.225])
-
-
-def numpy_to_pil(x: np.ndarray):
- """
- Args:
- x: Assumed to be of shape (h,w,c)
- """
- if isinstance(x, torch.Tensor):
- x = x.detach().cpu().numpy()
- if x.max() <= 1.01:
- x *= 255
- x = x.astype(np.uint8)
- return Image.fromarray(x)
-
-
-def tensor_to_pil(x, unnormalize=False):
- if unnormalize:
- x = x * (imagenet_std[:, None, None].to(x.device)) + (
- imagenet_mean[:, None, None].to(x.device)
- )
- x = x.detach().permute(1, 2, 0).cpu().numpy()
- x = np.clip(x, 0.0, 1.0)
- return numpy_to_pil(x)
-
-
-def to_cuda(batch):
- for key, value in batch.items():
- if isinstance(value, torch.Tensor):
- batch[key] = value.cuda()
- return batch
-
-
-def to_cpu(batch):
- for key, value in batch.items():
- if isinstance(value, torch.Tensor):
- batch[key] = value.cpu()
- return batch
-
-
-def get_pose(calib):
- w, h = np.array(calib["imsize"])[0]
- return np.array(calib["K"]), np.array(calib["R"]), np.array(calib["T"]).T, h, w
-
-
-def compute_relative_pose(R1, t1, R2, t2):
- rots = R2 @ (R1.T)
- trans = -rots @ t1 + t2
- return rots, trans
-
-
-@torch.no_grad()
-def reset_opt(opt):
- for group in opt.param_groups:
- for p in group["params"]:
- if p.requires_grad:
- state = opt.state[p]
- # State initialization
-
- # Exponential moving average of gradient values
- state["exp_avg"] = torch.zeros_like(p)
- # Exponential moving average of squared gradient values
- state["exp_avg_sq"] = torch.zeros_like(p)
- # Exponential moving average of gradient difference
- state["exp_avg_diff"] = torch.zeros_like(p)
-
-
-def flow_to_pixel_coords(flow, h1, w1):
- flow = torch.stack(
- (
- w1 * (flow[..., 0] + 1) / 2,
- h1 * (flow[..., 1] + 1) / 2,
- ),
- axis=-1,
- )
- return flow
-
-
-def flow_to_normalized_coords(flow, h1, w1):
- flow = torch.stack(
- (
- 2 * (flow[..., 0]) / w1 - 1,
- 2 * (flow[..., 1]) / h1 - 1,
- ),
- axis=-1,
- )
- return flow
-
-
-def warp_to_pixel_coords(warp, h1, w1, h2, w2):
- warp1 = warp[..., :2]
- warp1 = torch.stack(
- (
- w1 * (warp1[..., 0] + 1) / 2,
- h1 * (warp1[..., 1] + 1) / 2,
- ),
- axis=-1,
- )
- warp2 = warp[..., 2:]
- warp2 = torch.stack(
- (
- w2 * (warp2[..., 0] + 1) / 2,
- h2 * (warp2[..., 1] + 1) / 2,
- ),
- axis=-1,
- )
- return torch.cat((warp1, warp2), dim=-1)
-
-
-def signed_point_line_distance(point, line, eps: float = 1e-9):
- r"""Return the distance from points to lines.
-
- Args:
- point: (possibly homogeneous) points :math:`(*, N, 2 or 3)`.
- line: lines coefficients :math:`(a, b, c)` with shape :math:`(*, N, 3)`, where :math:`ax + by + c = 0`.
- eps: Small constant for safe sqrt.
-
- Returns:
- the computed distance with shape :math:`(*, N)`.
- """
-
- if not point.shape[-1] in (2, 3):
- raise ValueError(f"pts must be a (*, 2 or 3) tensor. Got {point.shape}")
-
- if not line.shape[-1] == 3:
- raise ValueError(f"lines must be a (*, 3) tensor. Got {line.shape}")
-
- numerator = (
- line[..., 0] * point[..., 0] + line[..., 1] * point[..., 1] + line[..., 2]
- )
- denominator = line[..., :2].norm(dim=-1)
-
- return numerator / (denominator + eps)
-
-
-def signed_left_to_right_epipolar_distance(pts1, pts2, Fm):
- r"""Return one-sided epipolar distance for correspondences given the fundamental matrix.
-
- This method measures the distance from points in the right images to the epilines
- of the corresponding points in the left images as they reflect in the right images.
-
- Args:
- pts1: correspondences from the left images with shape
- :math:`(*, N, 2 or 3)`. If they are not homogeneous, converted automatically.
- pts2: correspondences from the right images with shape
- :math:`(*, N, 2 or 3)`. If they are not homogeneous, converted automatically.
- Fm: Fundamental matrices with shape :math:`(*, 3, 3)`. Called Fm to
- avoid ambiguity with torch.nn.functional.
-
- Returns:
- the computed Symmetrical distance with shape :math:`(*, N)`.
- """
- import kornia
-
- if (len(Fm.shape) < 3) or not Fm.shape[-2:] == (3, 3):
- raise ValueError(f"Fm must be a (*, 3, 3) tensor. Got {Fm.shape}")
-
- if pts1.shape[-1] == 2:
- pts1 = kornia.geometry.convert_points_to_homogeneous(pts1)
-
- F_t = Fm.transpose(dim0=-2, dim1=-1)
- line1_in_2 = pts1 @ F_t
-
- return signed_point_line_distance(pts2, line1_in_2)
-
-
-def get_grid(b, h, w, device):
- grid = torch.meshgrid(
- *[torch.linspace(-1 + 1 / n, 1 - 1 / n, n, device=device) for n in (b, h, w)]
- )
- grid = torch.stack((grid[2], grid[1]), dim=-1).reshape(b, h, w, 2)
- return grid
diff --git a/spaces/Redgon/bingo/src/components/chat-history.tsx b/spaces/Redgon/bingo/src/components/chat-history.tsx
deleted file mode 100644
index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/components/chat-history.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons"
-
-export function ChatHistory() {
- return (
-
-
- 历史记录
-
-
-
-
-
-
-
-
-
-
无标题的聊天
-
-
上午1:42
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- )
-}
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/image/misc.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/image/misc.py
deleted file mode 100644
index 3e61f05e3b05e4c7b40de4eb6c8eb100e6da41d0..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/image/misc.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import numpy as np
-
-import annotator.uniformer.mmcv as mmcv
-
-try:
- import torch
-except ImportError:
- torch = None
-
-
-def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
- """Convert tensor to 3-channel images.
-
- Args:
- tensor (torch.Tensor): Tensor that contains multiple images, shape (
- N, C, H, W).
- mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
- std (tuple[float], optional): Standard deviation of images.
- Defaults to (1, 1, 1).
- to_rgb (bool, optional): Whether the tensor was converted to RGB
- format in the first place. If so, convert it back to BGR.
- Defaults to True.
-
- Returns:
- list[np.ndarray]: A list that contains multiple images.
- """
-
- if torch is None:
- raise RuntimeError('pytorch is not installed')
- assert torch.is_tensor(tensor) and tensor.ndim == 4
- assert len(mean) == 3
- assert len(std) == 3
-
- num_imgs = tensor.size(0)
- mean = np.array(mean, dtype=np.float32)
- std = np.array(std, dtype=np.float32)
- imgs = []
- for img_id in range(num_imgs):
- img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
- img = mmcv.imdenormalize(
- img, mean, std, to_bgr=to_rgb).astype(np.uint8)
- imgs.append(np.ascontiguousarray(img))
- return imgs
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
deleted file mode 100644
index cfc838f23270a1ae4d70f90059b67a890850e981..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import torch
-from mmcv.runner import force_fp32
-
-from mmdet.models.builder import ROI_EXTRACTORS
-from .base_roi_extractor import BaseRoIExtractor
-
-
-@ROI_EXTRACTORS.register_module()
-class SingleRoIExtractor(BaseRoIExtractor):
- """Extract RoI features from a single level feature map.
-
- If there are multiple input feature levels, each RoI is mapped to a level
- according to its scale. The mapping rule is proposed in
- `FPN `_.
-
- Args:
- roi_layer (dict): Specify RoI layer type and arguments.
- out_channels (int): Output channels of RoI layers.
- featmap_strides (List[int]): Strides of input feature maps.
- finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
- """
-
- def __init__(self,
- roi_layer,
- out_channels,
- featmap_strides,
- finest_scale=56):
- super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
- featmap_strides)
- self.finest_scale = finest_scale
-
- def map_roi_levels(self, rois, num_levels):
- """Map rois to corresponding feature levels by scales.
-
- - scale < finest_scale * 2: level 0
- - finest_scale * 2 <= scale < finest_scale * 4: level 1
- - finest_scale * 4 <= scale < finest_scale * 8: level 2
- - scale >= finest_scale * 8: level 3
-
- Args:
- rois (Tensor): Input RoIs, shape (k, 5).
- num_levels (int): Total level number.
-
- Returns:
- Tensor: Level index (0-based) of each RoI, shape (k, )
- """
- scale = torch.sqrt(
- (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
- target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
- target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
- return target_lvls
-
- @force_fp32(apply_to=('feats', ), out_fp16=True)
- def forward(self, feats, rois, roi_scale_factor=None):
- """Forward function."""
- out_size = self.roi_layers[0].output_size
- num_levels = len(feats)
- expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
- if torch.onnx.is_in_onnx_export():
- # Work around to export mask-rcnn to onnx
- roi_feats = rois[:, :1].clone().detach()
- roi_feats = roi_feats.expand(*expand_dims)
- roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
- roi_feats = roi_feats * 0
- else:
- roi_feats = feats[0].new_zeros(
- rois.size(0), self.out_channels, *out_size)
- # TODO: remove this when parrots supports
- if torch.__version__ == 'parrots':
- roi_feats.requires_grad = True
-
- if num_levels == 1:
- if len(rois) == 0:
- return roi_feats
- return self.roi_layers[0](feats[0], rois)
-
- target_lvls = self.map_roi_levels(rois, num_levels)
-
- if roi_scale_factor is not None:
- rois = self.roi_rescale(rois, roi_scale_factor)
-
- for i in range(num_levels):
- mask = target_lvls == i
- if torch.onnx.is_in_onnx_export():
- # To keep all roi_align nodes exported to onnx
- # and skip nonzero op
- mask = mask.float().unsqueeze(-1).expand(*expand_dims).reshape(
- roi_feats.shape)
- roi_feats_t = self.roi_layers[i](feats[i], rois)
- roi_feats_t *= mask
- roi_feats += roi_feats_t
- continue
- inds = mask.nonzero(as_tuple=False).squeeze(1)
- if inds.numel() > 0:
- rois_ = rois[inds]
- roi_feats_t = self.roi_layers[i](feats[i], rois_)
- roi_feats[inds] = roi_feats_t
- else:
- # Sometimes some pyramid levels will not be used for RoI
- # feature extraction and this will cause an incomplete
- # computation graph in one GPU, which is different from those
- # in other GPUs and will cause a hanging error.
- # Therefore, we add it to ensure each feature pyramid is
- # included in the computation graph to avoid runtime bugs.
- roi_feats += sum(
- x.view(-1)[0]
- for x in self.parameters()) * 0. + feats[i].sum() * 0.
- return roi_feats
diff --git a/spaces/SUPERSHANKY/Finetuned_Diffusion_Max/utils.py b/spaces/SUPERSHANKY/Finetuned_Diffusion_Max/utils.py
deleted file mode 100644
index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000
--- a/spaces/SUPERSHANKY/Finetuned_Diffusion_Max/utils.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def is_google_colab():
- try:
- import google.colab
- return True
- except:
- return False
\ No newline at end of file
diff --git a/spaces/Selim321/youtube-summarizer/app.py b/spaces/Selim321/youtube-summarizer/app.py
deleted file mode 100644
index fd21c6189b7ab6b9bb6049aab2cda8dc166b7bd5..0000000000000000000000000000000000000000
--- a/spaces/Selim321/youtube-summarizer/app.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import streamlit as st
-import requests
-from gtts import gTTS
-from urllib.parse import urlparse, parse_qs
-from youtube_transcript_api import YouTubeTranscriptApi
-import unicodedata
-from deepmultilingualpunctuation import PunctuationModel
-from transformers import pipeline
-
-
-def summarize_video(url):
- if "watch" in url:
- pass
- else:
- url = url.replace("youtu.be/", "www.youtube.com/watch?v=")
-
- parsed_url = urlparse(url)
- video_id = parse_qs(parsed_url.query)['v'][0]
-
- # Get the transcript
- transcript = YouTubeTranscriptApi.get_transcript(video_id)
-
- # Combining all the lists into on unique list
- text = []
- for i in range(0, len(transcript)):
- text.append(transcript[i]["text"])
-
- # Join list items into one paragraph
- video_transcript = " ".join(text)
- print("Text transcript created")
-
- print(video_transcript)
-
- # Text normalization
- my_string = unicodedata.normalize('NFKD', video_transcript)
- print("Text normalized")
-
-
- # Add punctuation
- model = PunctuationModel()
- result = model.restore_punctuation(video_transcript)
- print("Punctuation restored")
-
- # SUMMARIZATION
-
- # instantiate the summarization pipeline
- summarization_pipeline = pipeline(
- "summarization",
- model="t5-base", # you can choose a different model, depending on your requirements
- tokenizer="t5-base" # you can choose a different tokenizer, depending on your requirements
- )
-
- # define the input text to summarize
- input_text = result
-
- # split the input text into smaller chunks
- chunk_size = 5000
- chunks = [input_text[i:i+chunk_size] for i in range(0, len(input_text), chunk_size)]
-
- # summarize each chunk separately
- summaries = []
- for chunk in chunks:
- summary = summarization_pipeline(chunk, max_length=200, min_length=30, do_sample=False)
- summaries.append(summary[0]['summary_text'])
-
- # combine the summaries of all chunks into a single summary
- final_summary = " ".join(summaries)
-
- # print the generated summary
- return final_summary
-
-# Define the Streamlit app
-st.title("YouTube Summarizer")
-
-# Define the input form
-form = st.form(key="input_form")
-
-# Get the video ID from the URL
-video_url = form.text_input("Enter a YouTube video URL")
-
-# Submit button
-submit_button = form.form_submit_button("Summarize Video")
-
-# Handle form submissions
-if submit_button:
- # Call the summarize_video function to get the summary
- summary = summarize_video(video_url)
-
- # Display the summary to the user
- st.subheader("Summary")
- st.write(summary)
-
- # Convert text summary into audio
- tts = gTTS(summary)
- print("converting text to audio")
- tts.save('Summary.mp3')
-
- # Download audio transcript
- with open('Summary.mp3', 'rb') as f:
- st.download_button('Download mp3', f, file_name='Summary.mp3')
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/Silentlin/DiffSinger/utils/pl_utils.py b/spaces/Silentlin/DiffSinger/utils/pl_utils.py
deleted file mode 100644
index 76a94ed6abe22e349c51c49afdbf052d52b8d98b..0000000000000000000000000000000000000000
--- a/spaces/Silentlin/DiffSinger/utils/pl_utils.py
+++ /dev/null
@@ -1,1618 +0,0 @@
-import matplotlib
-from torch.nn import DataParallel
-from torch.nn.parallel import DistributedDataParallel
-
-matplotlib.use('Agg')
-import glob
-import itertools
-import subprocess
-import threading
-import traceback
-
-from pytorch_lightning.callbacks import GradientAccumulationScheduler
-from pytorch_lightning.callbacks import ModelCheckpoint
-
-from functools import wraps
-from torch.cuda._utils import _get_device_index
-import numpy as np
-import torch.optim
-import torch.utils.data
-import copy
-import logging
-import os
-import re
-import sys
-import torch
-import torch.distributed as dist
-import torch.multiprocessing as mp
-import tqdm
-from torch.optim.optimizer import Optimizer
-
-
-def get_a_var(obj): # pragma: no cover
- if isinstance(obj, torch.Tensor):
- return obj
-
- if isinstance(obj, list) or isinstance(obj, tuple):
- for result in map(get_a_var, obj):
- if isinstance(result, torch.Tensor):
- return result
- if isinstance(obj, dict):
- for result in map(get_a_var, obj.items()):
- if isinstance(result, torch.Tensor):
- return result
- return None
-
-
-def data_loader(fn):
- """
- Decorator to make any fx with this use the lazy property
- :param fn:
- :return:
- """
-
- wraps(fn)
- attr_name = '_lazy_' + fn.__name__
-
- def _get_data_loader(self):
- try:
- value = getattr(self, attr_name)
- except AttributeError:
- try:
- value = fn(self) # Lazy evaluation, done only once.
- if (
- value is not None and
- not isinstance(value, list) and
- fn.__name__ in ['test_dataloader', 'val_dataloader']
- ):
- value = [value]
- except AttributeError as e:
- # Guard against AttributeError suppression. (Issue #142)
- traceback.print_exc()
- error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
- raise RuntimeError(error) from e
- setattr(self, attr_name, value) # Memoize evaluation.
- return value
-
- return _get_data_loader
-
-
-def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
- r"""Applies each `module` in :attr:`modules` in parallel on arguments
- contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
- on each of :attr:`devices`.
-
- Args:
- modules (Module): modules to be parallelized
- inputs (tensor): inputs to the modules
- devices (list of int or torch.device): CUDA devices
-
- :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
- :attr:`devices` (if given) should all have same length. Moreover, each
- element of :attr:`inputs` can either be a single object as the only argument
- to a module, or a collection of positional arguments.
- """
- assert len(modules) == len(inputs)
- if kwargs_tup is not None:
- assert len(modules) == len(kwargs_tup)
- else:
- kwargs_tup = ({},) * len(modules)
- if devices is not None:
- assert len(modules) == len(devices)
- else:
- devices = [None] * len(modules)
- devices = list(map(lambda x: _get_device_index(x, True), devices))
- lock = threading.Lock()
- results = {}
- grad_enabled = torch.is_grad_enabled()
-
- def _worker(i, module, input, kwargs, device=None):
- torch.set_grad_enabled(grad_enabled)
- if device is None:
- device = get_a_var(input).get_device()
- try:
- with torch.cuda.device(device):
- # this also avoids accidental slicing of `input` if it is a Tensor
- if not isinstance(input, (list, tuple)):
- input = (input,)
-
- # ---------------
- # CHANGE
- if module.training:
- output = module.training_step(*input, **kwargs)
-
- elif module.testing:
- output = module.test_step(*input, **kwargs)
-
- else:
- output = module.validation_step(*input, **kwargs)
- # ---------------
-
- with lock:
- results[i] = output
- except Exception as e:
- with lock:
- results[i] = e
-
- # make sure each module knows what training state it's in...
- # fixes weird bug where copies are out of sync
- root_m = modules[0]
- for m in modules[1:]:
- m.training = root_m.training
- m.testing = root_m.testing
-
- if len(modules) > 1:
- threads = [threading.Thread(target=_worker,
- args=(i, module, input, kwargs, device))
- for i, (module, input, kwargs, device) in
- enumerate(zip(modules, inputs, kwargs_tup, devices))]
-
- for thread in threads:
- thread.start()
- for thread in threads:
- thread.join()
- else:
- _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
-
- outputs = []
- for i in range(len(inputs)):
- output = results[i]
- if isinstance(output, Exception):
- raise output
- outputs.append(output)
- return outputs
-
-
-def _find_tensors(obj): # pragma: no cover
- r"""
- Recursively find all tensors contained in the specified object.
- """
- if isinstance(obj, torch.Tensor):
- return [obj]
- if isinstance(obj, (list, tuple)):
- return itertools.chain(*map(_find_tensors, obj))
- if isinstance(obj, dict):
- return itertools.chain(*map(_find_tensors, obj.values()))
- return []
-
-
-class DDP(DistributedDataParallel):
- """
- Override the forward call in lightning so it goes to training and validation step respectively
- """
-
- def parallel_apply(self, replicas, inputs, kwargs):
- return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
-
- def forward(self, *inputs, **kwargs): # pragma: no cover
- self._sync_params()
- if self.device_ids:
- inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
- if len(self.device_ids) == 1:
- # --------------
- # LIGHTNING MOD
- # --------------
- # normal
- # output = self.module(*inputs[0], **kwargs[0])
- # lightning
- if self.module.training:
- output = self.module.training_step(*inputs[0], **kwargs[0])
- elif self.module.testing:
- output = self.module.test_step(*inputs[0], **kwargs[0])
- else:
- output = self.module.validation_step(*inputs[0], **kwargs[0])
- else:
- outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
- output = self.gather(outputs, self.output_device)
- else:
- # normal
- output = self.module(*inputs, **kwargs)
-
- if torch.is_grad_enabled():
- # We'll return the output object verbatim since it is a freeform
- # object. We need to find any tensors in this object, though,
- # because we need to figure out which parameters were used during
- # this forward pass, to ensure we short circuit reduction for any
- # unused parameters. Only if `find_unused_parameters` is set.
- if self.find_unused_parameters:
- self.reducer.prepare_for_backward(list(_find_tensors(output)))
- else:
- self.reducer.prepare_for_backward([])
- return output
-
-
-class DP(DataParallel):
- """
- Override the forward call in lightning so it goes to training and validation step respectively
- """
-
- def forward(self, *inputs, **kwargs):
- if not self.device_ids:
- return self.module(*inputs, **kwargs)
-
- for t in itertools.chain(self.module.parameters(), self.module.buffers()):
- if t.device != self.src_device_obj:
- raise RuntimeError("module must have its parameters and buffers "
- "on device {} (device_ids[0]) but found one of "
- "them on device: {}".format(self.src_device_obj, t.device))
-
- inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
- if len(self.device_ids) == 1:
- # lightning
- if self.module.training:
- return self.module.training_step(*inputs[0], **kwargs[0])
- elif self.module.testing:
- return self.module.test_step(*inputs[0], **kwargs[0])
- else:
- return self.module.validation_step(*inputs[0], **kwargs[0])
-
- replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
- outputs = self.parallel_apply(replicas, inputs, kwargs)
- return self.gather(outputs, self.output_device)
-
- def parallel_apply(self, replicas, inputs, kwargs):
- return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
-
-
-class GradientAccumulationScheduler:
- def __init__(self, scheduling: dict):
- if scheduling == {}: # empty dict error
- raise TypeError("Empty dict cannot be interpreted correct")
-
- for key in scheduling.keys():
- if not isinstance(key, int) or not isinstance(scheduling[key], int):
- raise TypeError("All epoches and accumulation factor must be integers")
-
- minimal_epoch = min(scheduling.keys())
- if minimal_epoch < 1:
- msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
- raise IndexError(msg)
- elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
- scheduling.update({1: 1})
-
- self.scheduling = scheduling
- self.epochs = sorted(scheduling.keys())
-
- def on_epoch_begin(self, epoch, trainer):
- epoch += 1 # indexing epochs from 1
- for i in reversed(range(len(self.epochs))):
- if epoch >= self.epochs[i]:
- trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
- break
-
-
-class LatestModelCheckpoint(ModelCheckpoint):
- def __init__(self, filepath, monitor='val_loss', verbose=0, num_ckpt_keep=5,
- save_weights_only=False, mode='auto', period=1, prefix='model', save_best=True):
- super(ModelCheckpoint, self).__init__()
- self.monitor = monitor
- self.verbose = verbose
- self.filepath = filepath
- os.makedirs(filepath, exist_ok=True)
- self.num_ckpt_keep = num_ckpt_keep
- self.save_best = save_best
- self.save_weights_only = save_weights_only
- self.period = period
- self.epochs_since_last_check = 0
- self.prefix = prefix
- self.best_k_models = {}
- # {filename: monitor}
- self.kth_best_model = ''
- self.save_top_k = 1
- self.task = None
- if mode == 'min':
- self.monitor_op = np.less
- self.best = np.Inf
- self.mode = 'min'
- elif mode == 'max':
- self.monitor_op = np.greater
- self.best = -np.Inf
- self.mode = 'max'
- else:
- if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
- self.monitor_op = np.greater
- self.best = -np.Inf
- self.mode = 'max'
- else:
- self.monitor_op = np.less
- self.best = np.Inf
- self.mode = 'min'
- if os.path.exists(f'{self.filepath}/best_valid.npy'):
- self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
-
- def get_all_ckpts(self):
- return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
- key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
-
- def on_epoch_end(self, epoch, logs=None):
- logs = logs or {}
- self.epochs_since_last_check += 1
- best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
- if self.epochs_since_last_check >= self.period:
- self.epochs_since_last_check = 0
- filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
- if self.verbose > 0:
- logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
- self._save_model(filepath)
- for old_ckpt in self.get_all_ckpts()[self.num_ckpt_keep:]:
- subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True)
- if self.verbose > 0:
- logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
- current = logs.get(self.monitor)
- if current is not None and self.save_best:
- if self.monitor_op(current, self.best):
- self.best = current
- if self.verbose > 0:
- logging.info(
- f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
- f' {current:0.5f} (best {self.best:0.5f}), saving model to'
- f' {best_filepath} as top 1')
- self._save_model(best_filepath)
- np.save(f'{self.filepath}/best_valid.npy', [self.best])
-
-
-class BaseTrainer:
- def __init__(
- self,
- logger=True,
- checkpoint_callback=True,
- default_save_path=None,
- gradient_clip_val=0,
- process_position=0,
- gpus=-1,
- log_gpu_memory=None,
- show_progress_bar=True,
- track_grad_norm=-1,
- check_val_every_n_epoch=1,
- accumulate_grad_batches=1,
- max_updates=1000,
- min_epochs=1,
- val_check_interval=1.0,
- log_save_interval=100,
- row_log_interval=10,
- print_nan_grads=False,
- weights_summary='full',
- num_sanity_val_steps=5,
- resume_from_checkpoint=None,
- ):
- self.log_gpu_memory = log_gpu_memory
- self.gradient_clip_val = gradient_clip_val
- self.check_val_every_n_epoch = check_val_every_n_epoch
- self.track_grad_norm = track_grad_norm
- self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
- self.process_position = process_position
- self.weights_summary = weights_summary
- self.max_updates = max_updates
- self.min_epochs = min_epochs
- self.num_sanity_val_steps = num_sanity_val_steps
- self.print_nan_grads = print_nan_grads
- self.resume_from_checkpoint = resume_from_checkpoint
- self.default_save_path = default_save_path
-
- # training bookeeping
- self.total_batch_idx = 0
- self.running_loss = []
- self.avg_loss = 0
- self.batch_idx = 0
- self.tqdm_metrics = {}
- self.callback_metrics = {}
- self.num_val_batches = 0
- self.num_training_batches = 0
- self.num_test_batches = 0
- self.get_train_dataloader = None
- self.get_test_dataloaders = None
- self.get_val_dataloaders = None
- self.is_iterable_train_dataloader = False
-
- # training state
- self.model = None
- self.testing = False
- self.disable_validation = False
- self.lr_schedulers = []
- self.optimizers = None
- self.global_step = 0
- self.current_epoch = 0
- self.total_batches = 0
-
- # configure checkpoint callback
- self.checkpoint_callback = checkpoint_callback
- self.checkpoint_callback.save_function = self.save_checkpoint
- self.weights_save_path = self.checkpoint_callback.filepath
-
- # accumulated grads
- self.configure_accumulated_gradients(accumulate_grad_batches)
-
- # allow int, string and gpu list
- self.data_parallel_device_ids = [
- int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != '']
- if len(self.data_parallel_device_ids) == 0:
- self.root_gpu = None
- self.on_gpu = False
- else:
- self.root_gpu = self.data_parallel_device_ids[0]
- self.on_gpu = True
-
- # distributed backend choice
- self.use_ddp = False
- self.use_dp = False
- self.single_gpu = False
- self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
- self.set_distributed_mode(self.distributed_backend)
-
- self.proc_rank = 0
- self.world_size = 1
- self.node_rank = 0
-
- # can't init progress bar here because starting a new process
- # means the progress_bar won't survive pickling
- self.show_progress_bar = show_progress_bar
-
- # logging
- self.log_save_interval = log_save_interval
- self.val_check_interval = val_check_interval
- self.logger = logger
- self.logger.rank = 0
- self.row_log_interval = row_log_interval
-
- @property
- def num_gpus(self):
- gpus = self.data_parallel_device_ids
- if gpus is None:
- return 0
- else:
- return len(gpus)
-
- @property
- def data_parallel(self):
- return self.use_dp or self.use_ddp
-
- def get_model(self):
- is_dp_module = isinstance(self.model, (DDP, DP))
- model = self.model.module if is_dp_module else self.model
- return model
-
- # -----------------------------
- # MODEL TRAINING
- # -----------------------------
- def fit(self, model):
- if self.use_ddp:
- mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
- else:
- model.model = model.build_model()
- if not self.testing:
- self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
- if self.use_dp:
- model.cuda(self.root_gpu)
- model = DP(model, device_ids=self.data_parallel_device_ids)
- elif self.single_gpu:
- model.cuda(self.root_gpu)
- self.run_pretrain_routine(model)
- return 1
-
- def init_optimizers(self, optimizers):
-
- # single optimizer
- if isinstance(optimizers, Optimizer):
- return [optimizers], []
-
- # two lists
- elif len(optimizers) == 2 and isinstance(optimizers[0], list):
- optimizers, lr_schedulers = optimizers
- return optimizers, lr_schedulers
-
- # single list or tuple
- elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
- return optimizers, []
-
- def run_pretrain_routine(self, model):
- """Sanity check a few things before starting actual training.
-
- :param model:
- """
- ref_model = model
- if self.data_parallel:
- ref_model = model.module
-
- # give model convenience properties
- ref_model.trainer = self
-
- # set local properties on the model
- self.copy_trainer_model_properties(ref_model)
-
- # link up experiment object
- if self.logger is not None:
- ref_model.logger = self.logger
- self.logger.save()
-
- if self.use_ddp:
- dist.barrier()
-
- # set up checkpoint callback
- # self.configure_checkpoint_callback()
-
- # transfer data loaders from model
- self.get_dataloaders(ref_model)
-
- # track model now.
- # if cluster resets state, the model will update with the saved weights
- self.model = model
-
- # restore training and model before hpc call
- self.restore_weights(model)
-
- # when testing requested only run test and return
- if self.testing:
- self.run_evaluation(test=True)
- return
-
- # check if we should run validation during training
- self.disable_validation = self.num_val_batches == 0
-
- # run tiny validation (if validation defined)
- # to make sure program won't crash during val
- ref_model.on_sanity_check_start()
- ref_model.on_train_start()
- if not self.disable_validation and self.num_sanity_val_steps > 0:
- # init progress bars for validation sanity check
- pbar = tqdm.tqdm(desc='Validation sanity check',
- total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
- leave=False, position=2 * self.process_position,
- disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
- self.main_progress_bar = pbar
- # dummy validation progress bar
- self.val_progress_bar = tqdm.tqdm(disable=True)
-
- self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
-
- # close progress bars
- self.main_progress_bar.close()
- self.val_progress_bar.close()
-
- # init progress bar
- pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
- disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
- file=sys.stdout)
- self.main_progress_bar = pbar
-
- # clear cache before training
- if self.on_gpu:
- torch.cuda.empty_cache()
-
- # CORE TRAINING LOOP
- self.train()
-
- def test(self, model):
- self.testing = True
- self.fit(model)
-
- @property
- def training_tqdm_dict(self):
- tqdm_dict = {
- 'step': '{}'.format(self.global_step),
- }
- tqdm_dict.update(self.tqdm_metrics)
- return tqdm_dict
-
- # --------------------
- # restore ckpt
- # --------------------
- def restore_weights(self, model):
- """
- To restore weights we have two cases.
- First, attempt to restore hpc weights. If successful, don't restore
- other weights.
-
- Otherwise, try to restore actual weights
- :param model:
- :return:
- """
- # clear cache before restore
- if self.on_gpu:
- torch.cuda.empty_cache()
-
- if self.resume_from_checkpoint is not None:
- self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
- else:
- # restore weights if same exp version
- self.restore_state_if_checkpoint_exists(model)
-
- # wait for all models to restore weights
- if self.use_ddp:
- # wait for all processes to catch up
- dist.barrier()
-
- # clear cache after restore
- if self.on_gpu:
- torch.cuda.empty_cache()
-
- def restore_state_if_checkpoint_exists(self, model):
- did_restore = False
-
- # do nothing if there's not dir or callback
- no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
- if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
- return did_restore
-
- # restore trainer state and model if there is a weight for this experiment
- last_steps = -1
- last_ckpt_name = None
-
- # find last epoch
- checkpoints = os.listdir(self.checkpoint_callback.filepath)
- for name in checkpoints:
- if '.ckpt' in name and not name.endswith('part'):
- if 'steps_' in name:
- steps = name.split('steps_')[1]
- steps = int(re.sub('[^0-9]', '', steps))
-
- if steps > last_steps:
- last_steps = steps
- last_ckpt_name = name
-
- # restore last checkpoint
- if last_ckpt_name is not None:
- last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
- self.restore(last_ckpt_path, self.on_gpu)
- logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
- did_restore = True
-
- return did_restore
-
- def restore(self, checkpoint_path, on_gpu):
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
-
- # load model state
- model = self.get_model()
-
- # load the state_dict on the model automatically
- model.load_state_dict(checkpoint['state_dict'], strict=False)
- if on_gpu:
- model.cuda(self.root_gpu)
- # load training state (affects trainer only)
- self.restore_training_state(checkpoint)
- model.global_step = self.global_step
- del checkpoint
-
- try:
- if dist.is_initialized() and dist.get_rank() > 0:
- return
- except Exception as e:
- print(e)
- return
-
- def restore_training_state(self, checkpoint):
- """
- Restore trainer state.
- Model will get its change to update
- :param checkpoint:
- :return:
- """
- if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
- self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
-
- self.global_step = checkpoint['global_step']
- self.current_epoch = checkpoint['epoch']
-
- if self.testing:
- return
-
- # restore the optimizers
- optimizer_states = checkpoint['optimizer_states']
- for optimizer, opt_state in zip(self.optimizers, optimizer_states):
- if optimizer is None:
- return
- optimizer.load_state_dict(opt_state)
-
- # move optimizer to GPU 1 weight at a time
- # avoids OOM
- if self.root_gpu is not None:
- for state in optimizer.state.values():
- for k, v in state.items():
- if isinstance(v, torch.Tensor):
- state[k] = v.cuda(self.root_gpu)
-
- # restore the lr schedulers
- lr_schedulers = checkpoint['lr_schedulers']
- for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
- scheduler.load_state_dict(lrs_state)
-
- # --------------------
- # MODEL SAVE CHECKPOINT
- # --------------------
- def _atomic_save(self, checkpoint, filepath):
- """Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
-
- This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
- saving is finished.
-
- Args:
- checkpoint (object): The object to save.
- Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
- accepts.
- filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
- This points to the file that the checkpoint will be stored in.
- """
- tmp_path = str(filepath) + ".part"
- torch.save(checkpoint, tmp_path)
- os.replace(tmp_path, filepath)
-
- def save_checkpoint(self, filepath):
- checkpoint = self.dump_checkpoint()
- self._atomic_save(checkpoint, filepath)
-
- def dump_checkpoint(self):
-
- checkpoint = {
- 'epoch': self.current_epoch,
- 'global_step': self.global_step
- }
-
- if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
- checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
-
- # save optimizers
- optimizer_states = []
- for i, optimizer in enumerate(self.optimizers):
- if optimizer is not None:
- optimizer_states.append(optimizer.state_dict())
-
- checkpoint['optimizer_states'] = optimizer_states
-
- # save lr schedulers
- lr_schedulers = []
- for i, scheduler in enumerate(self.lr_schedulers):
- lr_schedulers.append(scheduler.state_dict())
-
- checkpoint['lr_schedulers'] = lr_schedulers
-
- # add the hparams and state_dict from the model
- model = self.get_model()
- checkpoint['state_dict'] = model.state_dict()
- # give the model a chance to add a few things
- model.on_save_checkpoint(checkpoint)
-
- return checkpoint
-
- def copy_trainer_model_properties(self, model):
- if isinstance(model, DP):
- ref_model = model.module
- elif isinstance(model, DDP):
- ref_model = model.module
- else:
- ref_model = model
-
- for m in [model, ref_model]:
- m.trainer = self
- m.on_gpu = self.on_gpu
- m.use_dp = self.use_dp
- m.use_ddp = self.use_ddp
- m.testing = self.testing
- m.single_gpu = self.single_gpu
-
- def transfer_batch_to_gpu(self, batch, gpu_id):
- # base case: object can be directly moved using `cuda` or `to`
- if callable(getattr(batch, 'cuda', None)):
- return batch.cuda(gpu_id, non_blocking=True)
-
- elif callable(getattr(batch, 'to', None)):
- return batch.to(torch.device('cuda', gpu_id), non_blocking=True)
-
- # when list
- elif isinstance(batch, list):
- for i, x in enumerate(batch):
- batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
- return batch
-
- # when tuple
- elif isinstance(batch, tuple):
- batch = list(batch)
- for i, x in enumerate(batch):
- batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
- return tuple(batch)
-
- # when dict
- elif isinstance(batch, dict):
- for k, v in batch.items():
- batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
-
- return batch
-
- # nothing matches, return the value as is without transform
- return batch
-
- def set_distributed_mode(self, distributed_backend):
- # skip for CPU
- if self.num_gpus == 0:
- return
-
- # single GPU case
- # in single gpu case we allow ddp so we can train on multiple
- # nodes, 1 gpu per node
- elif self.num_gpus == 1:
- self.single_gpu = True
- self.use_dp = False
- self.use_ddp = False
- self.root_gpu = 0
- self.data_parallel_device_ids = [0]
- else:
- if distributed_backend is not None:
- self.use_dp = distributed_backend == 'dp'
- self.use_ddp = distributed_backend == 'ddp'
- elif distributed_backend is None:
- self.use_dp = True
- self.use_ddp = False
-
- logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
-
- def ddp_train(self, gpu_idx, model):
- """
- Entry point into a DP thread
- :param gpu_idx:
- :param model:
- :param cluster_obj:
- :return:
- """
- # otherwise default to node rank 0
- self.node_rank = 0
-
- # show progressbar only on progress_rank 0
- self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
-
- # determine which process we are and world size
- if self.use_ddp:
- self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
- self.world_size = self.num_gpus
-
- # let the exp know the rank to avoid overwriting logs
- if self.logger is not None:
- self.logger.rank = self.proc_rank
-
- # set up server using proc 0's ip address
- # try to init for 20 times at max in case ports are taken
- # where to store ip_table
- model.trainer = self
- model.init_ddp_connection(self.proc_rank, self.world_size)
-
- # CHOOSE OPTIMIZER
- # allow for lr schedulers as well
- model.model = model.build_model()
- if not self.testing:
- self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
-
- # MODEL
- # copy model to each gpu
- if self.distributed_backend == 'ddp':
- torch.cuda.set_device(gpu_idx)
- model.cuda(gpu_idx)
-
- # set model properties before going into wrapper
- self.copy_trainer_model_properties(model)
-
- # override root GPU
- self.root_gpu = gpu_idx
-
- if self.distributed_backend == 'ddp':
- device_ids = [gpu_idx]
- else:
- device_ids = None
-
- # allow user to configure ddp
- model = model.configure_ddp(model, device_ids)
-
- # continue training routine
- self.run_pretrain_routine(model)
-
- def resolve_root_node_address(self, root_node):
- if '[' in root_node:
- name = root_node.split('[')[0]
- number = root_node.split(',')[0]
- if '-' in number:
- number = number.split('-')[0]
-
- number = re.sub('[^0-9]', '', number)
- root_node = name + number
-
- return root_node
-
- def log_metrics(self, metrics, grad_norm_dic, step=None):
- """Logs the metric dict passed in.
-
- :param metrics:
- :param grad_norm_dic:
- """
- # added metrics by Lightning for convenience
- metrics['epoch'] = self.current_epoch
-
- # add norms
- metrics.update(grad_norm_dic)
-
- # turn all tensors to scalars
- scalar_metrics = self.metrics_to_scalars(metrics)
-
- step = step if step is not None else self.global_step
- # log actual metrics
- if self.proc_rank == 0 and self.logger is not None:
- self.logger.log_metrics(scalar_metrics, step=step)
- self.logger.save()
-
- def add_tqdm_metrics(self, metrics):
- for k, v in metrics.items():
- if type(v) is torch.Tensor:
- v = v.item()
-
- self.tqdm_metrics[k] = v
-
- def metrics_to_scalars(self, metrics):
- new_metrics = {}
- for k, v in metrics.items():
- if isinstance(v, torch.Tensor):
- v = v.item()
-
- if type(v) is dict:
- v = self.metrics_to_scalars(v)
-
- new_metrics[k] = v
-
- return new_metrics
-
- def process_output(self, output, train=False):
- """Reduces output according to the training mode.
-
- Separates loss from logging and tqdm metrics
- :param output:
- :return:
- """
- # ---------------
- # EXTRACT CALLBACK KEYS
- # ---------------
- # all keys not progress_bar or log are candidates for callbacks
- callback_metrics = {}
- for k, v in output.items():
- if k not in ['progress_bar', 'log', 'hiddens']:
- callback_metrics[k] = v
-
- if train and self.use_dp:
- num_gpus = self.num_gpus
- callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
-
- for k, v in callback_metrics.items():
- if isinstance(v, torch.Tensor):
- callback_metrics[k] = v.item()
-
- # ---------------
- # EXTRACT PROGRESS BAR KEYS
- # ---------------
- try:
- progress_output = output['progress_bar']
-
- # reduce progress metrics for tqdm when using dp
- if train and self.use_dp:
- num_gpus = self.num_gpus
- progress_output = self.reduce_distributed_output(progress_output, num_gpus)
-
- progress_bar_metrics = progress_output
- except Exception:
- progress_bar_metrics = {}
-
- # ---------------
- # EXTRACT LOGGING KEYS
- # ---------------
- # extract metrics to log to experiment
- try:
- log_output = output['log']
-
- # reduce progress metrics for tqdm when using dp
- if train and self.use_dp:
- num_gpus = self.num_gpus
- log_output = self.reduce_distributed_output(log_output, num_gpus)
-
- log_metrics = log_output
- except Exception:
- log_metrics = {}
-
- # ---------------
- # EXTRACT LOSS
- # ---------------
- # if output dict doesn't have the keyword loss
- # then assume the output=loss if scalar
- loss = None
- if train:
- try:
- loss = output['loss']
- except Exception:
- if type(output) is torch.Tensor:
- loss = output
- else:
- raise RuntimeError(
- 'No `loss` value in the dictionary returned from `model.training_step()`.'
- )
-
- # when using dp need to reduce the loss
- if self.use_dp:
- loss = self.reduce_distributed_output(loss, self.num_gpus)
-
- # ---------------
- # EXTRACT HIDDEN
- # ---------------
- hiddens = output.get('hiddens')
-
- # use every metric passed in as a candidate for callback
- callback_metrics.update(progress_bar_metrics)
- callback_metrics.update(log_metrics)
-
- # convert tensors to numpy
- for k, v in callback_metrics.items():
- if isinstance(v, torch.Tensor):
- callback_metrics[k] = v.item()
-
- return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
-
- def reduce_distributed_output(self, output, num_gpus):
- if num_gpus <= 1:
- return output
-
- # when using DP, we get one output per gpu
- # average outputs and return
- if type(output) is torch.Tensor:
- return output.mean()
-
- for k, v in output.items():
- # recurse on nested dics
- if isinstance(output[k], dict):
- output[k] = self.reduce_distributed_output(output[k], num_gpus)
-
- # do nothing when there's a scalar
- elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
- pass
-
- # reduce only metrics that have the same number of gpus
- elif output[k].size(0) == num_gpus:
- reduced = torch.mean(output[k])
- output[k] = reduced
- return output
-
- def clip_gradients(self):
- if self.gradient_clip_val > 0:
- model = self.get_model()
- torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
-
- def print_nan_gradients(self):
- model = self.get_model()
- for param in model.parameters():
- if (param.grad is not None) and torch.isnan(param.grad.float()).any():
- logging.info(param, param.grad)
-
- def configure_accumulated_gradients(self, accumulate_grad_batches):
- self.accumulate_grad_batches = None
-
- if isinstance(accumulate_grad_batches, dict):
- self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
- elif isinstance(accumulate_grad_batches, int):
- schedule = {1: accumulate_grad_batches}
- self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
- else:
- raise TypeError("Gradient accumulation supports only int and dict types")
-
- def get_dataloaders(self, model):
- if not self.testing:
- self.init_train_dataloader(model)
- self.init_val_dataloader(model)
- else:
- self.init_test_dataloader(model)
-
- if self.use_ddp:
- dist.barrier()
- if not self.testing:
- self.get_train_dataloader()
- self.get_val_dataloaders()
- else:
- self.get_test_dataloaders()
-
- def init_train_dataloader(self, model):
- self.fisrt_epoch = True
- self.get_train_dataloader = model.train_dataloader
- if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
- self.num_training_batches = len(self.get_train_dataloader())
- self.num_training_batches = int(self.num_training_batches)
- else:
- self.num_training_batches = float('inf')
- self.is_iterable_train_dataloader = True
- if isinstance(self.val_check_interval, int):
- self.val_check_batch = self.val_check_interval
- else:
- self._percent_range_check('val_check_interval')
- self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
- self.val_check_batch = max(1, self.val_check_batch)
-
- def init_val_dataloader(self, model):
- self.get_val_dataloaders = model.val_dataloader
- self.num_val_batches = 0
- if self.get_val_dataloaders() is not None:
- if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
- self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
- self.num_val_batches = int(self.num_val_batches)
- else:
- self.num_val_batches = float('inf')
-
- def init_test_dataloader(self, model):
- self.get_test_dataloaders = model.test_dataloader
- if self.get_test_dataloaders() is not None:
- if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
- self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
- self.num_test_batches = int(self.num_test_batches)
- else:
- self.num_test_batches = float('inf')
-
- def evaluate(self, model, dataloaders, max_batches, test=False):
- """Run evaluation code.
-
- :param model: PT model
- :param dataloaders: list of PT dataloaders
- :param max_batches: Scalar
- :param test: boolean
- :return:
- """
- # enable eval mode
- model.zero_grad()
- model.eval()
-
- # copy properties for forward overrides
- self.copy_trainer_model_properties(model)
-
- # disable gradients to save memory
- torch.set_grad_enabled(False)
-
- if test:
- self.get_model().test_start()
- # bookkeeping
- outputs = []
-
- # run training
- for dataloader_idx, dataloader in enumerate(dataloaders):
- dl_outputs = []
- for batch_idx, batch in enumerate(dataloader):
-
- if batch is None: # pragma: no cover
- continue
-
- # stop short when on fast_dev_run (sets max_batch=1)
- if batch_idx >= max_batches:
- break
-
- # -----------------
- # RUN EVALUATION STEP
- # -----------------
- output = self.evaluation_forward(model,
- batch,
- batch_idx,
- dataloader_idx,
- test)
-
- # track outputs for collation
- dl_outputs.append(output)
-
- # batch done
- if test:
- self.test_progress_bar.update(1)
- else:
- self.val_progress_bar.update(1)
- outputs.append(dl_outputs)
-
- # with a single dataloader don't pass an array
- if len(dataloaders) == 1:
- outputs = outputs[0]
-
- # give model a chance to do something with the outputs (and method defined)
- model = self.get_model()
- if test:
- eval_results_ = model.test_end(outputs)
- else:
- eval_results_ = model.validation_end(outputs)
- eval_results = eval_results_
-
- # enable train mode again
- model.train()
-
- # enable gradients to save memory
- torch.set_grad_enabled(True)
-
- return eval_results
-
- def run_evaluation(self, test=False):
- # when testing make sure user defined a test step
- model = self.get_model()
- model.on_pre_performance_check()
-
- # select dataloaders
- if test:
- dataloaders = self.get_test_dataloaders()
- max_batches = self.num_test_batches
- else:
- # val
- dataloaders = self.get_val_dataloaders()
- max_batches = self.num_val_batches
-
- # init validation or test progress bar
- # main progress bar will already be closed when testing so initial position is free
- position = 2 * self.process_position + (not test)
- desc = 'Testing' if test else 'Validating'
- pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
- disable=not self.show_progress_bar, dynamic_ncols=True,
- unit='batch', file=sys.stdout)
- setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
-
- # run evaluation
- eval_results = self.evaluate(self.model,
- dataloaders,
- max_batches,
- test)
- if eval_results is not None:
- _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
- eval_results)
-
- # add metrics to prog bar
- self.add_tqdm_metrics(prog_bar_metrics)
-
- # log metrics
- self.log_metrics(log_metrics, {})
-
- # track metrics for callbacks
- self.callback_metrics.update(callback_metrics)
-
- # hook
- model.on_post_performance_check()
-
- # add model specific metrics
- tqdm_metrics = self.training_tqdm_dict
- if not test:
- self.main_progress_bar.set_postfix(**tqdm_metrics)
-
- # close progress bar
- if test:
- self.test_progress_bar.close()
- else:
- self.val_progress_bar.close()
-
- # model checkpointing
- if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
- self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
- logs=self.callback_metrics)
-
- def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
- # make dataloader_idx arg in validation_step optional
- args = [batch, batch_idx]
-
- if test and len(self.get_test_dataloaders()) > 1:
- args.append(dataloader_idx)
-
- elif not test and len(self.get_val_dataloaders()) > 1:
- args.append(dataloader_idx)
-
- # handle DP, DDP forward
- if self.use_ddp or self.use_dp:
- output = model(*args)
- return output
-
- # single GPU
- if self.single_gpu:
- # for single GPU put inputs on gpu manually
- root_gpu = 0
- if isinstance(self.data_parallel_device_ids, list):
- root_gpu = self.data_parallel_device_ids[0]
- batch = self.transfer_batch_to_gpu(batch, root_gpu)
- args[0] = batch
-
- # CPU
- if test:
- output = model.test_step(*args)
- else:
- output = model.validation_step(*args)
-
- return output
-
- def train(self):
- model = self.get_model()
- # run all epochs
- for epoch in range(self.current_epoch, 1000000):
- # set seed for distributed sampler (enables shuffling for each epoch)
- if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
- self.get_train_dataloader().sampler.set_epoch(epoch)
-
- # get model
- model = self.get_model()
-
- # update training progress in trainer and model
- model.current_epoch = epoch
- self.current_epoch = epoch
-
- total_val_batches = 0
- if not self.disable_validation:
- # val can be checked multiple times in epoch
- is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
- val_checks_per_epoch = self.num_training_batches // self.val_check_batch
- val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
- total_val_batches = self.num_val_batches * val_checks_per_epoch
-
- # total batches includes multiple val checks
- self.total_batches = self.num_training_batches + total_val_batches
- self.batch_loss_value = 0 # accumulated grads
-
- if self.is_iterable_train_dataloader:
- # for iterable train loader, the progress bar never ends
- num_iterations = None
- else:
- num_iterations = self.total_batches
-
- # reset progress bar
- # .reset() doesn't work on disabled progress bar so we should check
- desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
- self.main_progress_bar.set_description(desc)
-
- # changing gradient according accumulation_scheduler
- self.accumulation_scheduler.on_epoch_begin(epoch, self)
-
- # -----------------
- # RUN TNG EPOCH
- # -----------------
- self.run_training_epoch()
-
- # update LR schedulers
- if self.lr_schedulers is not None:
- for lr_scheduler in self.lr_schedulers:
- lr_scheduler.step(epoch=self.current_epoch)
-
- self.main_progress_bar.close()
-
- model.on_train_end()
-
- if self.logger is not None:
- self.logger.finalize("success")
-
- def run_training_epoch(self):
- # before epoch hook
- if self.is_function_implemented('on_epoch_start'):
- model = self.get_model()
- model.on_epoch_start()
-
- # run epoch
- for batch_idx, batch in enumerate(self.get_train_dataloader()):
- # stop epoch if we limited the number of training batches
- if batch_idx >= self.num_training_batches:
- break
-
- self.batch_idx = batch_idx
-
- model = self.get_model()
- model.global_step = self.global_step
-
- # ---------------
- # RUN TRAIN STEP
- # ---------------
- output = self.run_training_batch(batch, batch_idx)
- batch_result, grad_norm_dic, batch_step_metrics = output
-
- # when returning -1 from train_step, we end epoch early
- early_stop_epoch = batch_result == -1
-
- # ---------------
- # RUN VAL STEP
- # ---------------
- should_check_val = (
- not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
- self.fisrt_epoch = False
-
- if should_check_val:
- self.run_evaluation(test=self.testing)
-
- # when logs should be saved
- should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
- if should_save_log:
- if self.proc_rank == 0 and self.logger is not None:
- self.logger.save()
-
- # when metrics should be logged
- should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
- if should_log_metrics:
- # logs user requested information to logger
- self.log_metrics(batch_step_metrics, grad_norm_dic)
-
- self.global_step += 1
- self.total_batch_idx += 1
-
- # end epoch early
- # stop when the flag is changed or we've gone past the amount
- # requested in the batches
- if early_stop_epoch:
- break
- if self.global_step > self.max_updates:
- print("| Training end..")
- exit()
-
- # epoch end hook
- if self.is_function_implemented('on_epoch_end'):
- model = self.get_model()
- model.on_epoch_end()
-
- def run_training_batch(self, batch, batch_idx):
- # track grad norms
- grad_norm_dic = {}
-
- # track all metrics for callbacks
- all_callback_metrics = []
-
- # track metrics to log
- all_log_metrics = []
-
- if batch is None:
- return 0, grad_norm_dic, {}
-
- # hook
- if self.is_function_implemented('on_batch_start'):
- model_ref = self.get_model()
- response = model_ref.on_batch_start(batch)
-
- if response == -1:
- return -1, grad_norm_dic, {}
-
- splits = [batch]
- self.hiddens = None
- for split_idx, split_batch in enumerate(splits):
- self.split_idx = split_idx
-
- # call training_step once per optimizer
- for opt_idx, optimizer in enumerate(self.optimizers):
- if optimizer is None:
- continue
- # make sure only the gradients of the current optimizer's paramaters are calculated
- # in the training step to prevent dangling gradients in multiple-optimizer setup.
- if len(self.optimizers) > 1:
- for param in self.get_model().parameters():
- param.requires_grad = False
- for group in optimizer.param_groups:
- for param in group['params']:
- param.requires_grad = True
-
- # wrap the forward step in a closure so second order methods work
- def optimizer_closure():
- # forward pass
- output = self.training_forward(
- split_batch, batch_idx, opt_idx, self.hiddens)
-
- closure_loss = output[0]
- progress_bar_metrics = output[1]
- log_metrics = output[2]
- callback_metrics = output[3]
- self.hiddens = output[4]
- if closure_loss is None:
- return None
-
- # accumulate loss
- # (if accumulate_grad_batches = 1 no effect)
- closure_loss = closure_loss / self.accumulate_grad_batches
-
- # backward pass
- model_ref = self.get_model()
- if closure_loss.requires_grad:
- model_ref.backward(closure_loss, optimizer)
-
- # track metrics for callbacks
- all_callback_metrics.append(callback_metrics)
-
- # track progress bar metrics
- self.add_tqdm_metrics(progress_bar_metrics)
- all_log_metrics.append(log_metrics)
-
- # insert after step hook
- if self.is_function_implemented('on_after_backward'):
- model_ref = self.get_model()
- model_ref.on_after_backward()
-
- return closure_loss
-
- # calculate loss
- loss = optimizer_closure()
- if loss is None:
- continue
-
- # nan grads
- if self.print_nan_grads:
- self.print_nan_gradients()
-
- # track total loss for logging (avoid mem leaks)
- self.batch_loss_value += loss.item()
-
- # gradient update with accumulated gradients
- if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
-
- # track gradient norms when requested
- if batch_idx % self.row_log_interval == 0:
- if self.track_grad_norm > 0:
- model = self.get_model()
- grad_norm_dic = model.grad_norm(
- self.track_grad_norm)
-
- # clip gradients
- self.clip_gradients()
-
- # calls .step(), .zero_grad()
- # override function to modify this behavior
- model = self.get_model()
- model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
-
- # calculate running loss for display
- self.running_loss.append(self.batch_loss_value)
- self.batch_loss_value = 0
- self.avg_loss = np.mean(self.running_loss[-100:])
-
- # activate batch end hook
- if self.is_function_implemented('on_batch_end'):
- model = self.get_model()
- model.on_batch_end()
-
- # update progress bar
- self.main_progress_bar.update(1)
- self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
-
- # collapse all metrics into one dict
- all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
-
- # track all metrics for callbacks
- self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
-
- return 0, grad_norm_dic, all_log_metrics
-
- def training_forward(self, batch, batch_idx, opt_idx, hiddens):
- """
- Handle forward for each training case (distributed, single gpu, etc...)
- :param batch:
- :param batch_idx:
- :return:
- """
- # ---------------
- # FORWARD
- # ---------------
- # enable not needing to add opt_idx to training_step
- args = [batch, batch_idx, opt_idx]
-
- # distributed forward
- if self.use_ddp or self.use_dp:
- output = self.model(*args)
- # single GPU forward
- elif self.single_gpu:
- gpu_id = 0
- if isinstance(self.data_parallel_device_ids, list):
- gpu_id = self.data_parallel_device_ids[0]
- batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
- args[0] = batch
- output = self.model.training_step(*args)
- # CPU forward
- else:
- output = self.model.training_step(*args)
-
- # allow any mode to define training_end
- model_ref = self.get_model()
- output_ = model_ref.training_end(output)
- if output_ is not None:
- output = output_
-
- # format and reduce outputs accordingly
- output = self.process_output(output, train=True)
-
- return output
-
- # ---------------
- # Utils
- # ---------------
- def is_function_implemented(self, f_name):
- model = self.get_model()
- f_op = getattr(model, f_name, None)
- return callable(f_op)
-
- def _percent_range_check(self, name):
- value = getattr(self, name)
- msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
- if name == "val_check_interval":
- msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
-
- if not 0. <= value <= 1.:
- raise ValueError(msg)
diff --git a/spaces/Siyuan0730/revise_IELTS_writting/README.md b/spaces/Siyuan0730/revise_IELTS_writting/README.md
deleted file mode 100644
index c833df8f84a222dc6ab29b98b081d2128ce4ccc0..0000000000000000000000000000000000000000
--- a/spaces/Siyuan0730/revise_IELTS_writting/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Revise IELTS Writting
-emoji: 🏃
-colorFrom: pink
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.28.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SkalskiP/SAM_and_ProPainter/Dockerfile b/spaces/SkalskiP/SAM_and_ProPainter/Dockerfile
deleted file mode 100644
index e2b0f324d5f91129698a2061588e1c850bfe355f..0000000000000000000000000000000000000000
--- a/spaces/SkalskiP/SAM_and_ProPainter/Dockerfile
+++ /dev/null
@@ -1,60 +0,0 @@
-FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
-
-ENV DEBIAN_FRONTEND=noninteractive
-
-#RUN apt-get update && apt-get install -y \
-# git wget libgl1-mesa-glx libglib2.0-0 ffmpeg libx264-dev \
-# && rm -rf /var/lib/apt/lists/*
-
-RUN apt-get update && apt-get install -y \
- git make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
- libsqlite3-dev wget curl llvm libncursesw5-dev xz-utils tk-dev libxml2-dev \
- libxmlsec1-dev libffi-dev liblzma-dev git-lfs ffmpeg libsm6 libxext6 cmake \
- libgl1-mesa-glx \
- && rm -rf /var/lib/apt/lists/* && git lfs install
-
-RUN useradd -m -u 1000 user
-
-USER user
-
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH \
- PYTHONPATH=$HOME/app \
- PYTHONUNBUFFERED=1 \
- GRADIO_ALLOW_FLAGGING=never \
- GRADIO_NUM_PORTS=1 \
- GRADIO_SERVER_NAME=0.0.0.0 \
- GRADIO_THEME=huggingface \
- GRADIO_SHARE=False \
- SYSTEM=spaces
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Clone your repository or add your code to the container
-RUN git clone https://github.com/sczhou/ProPainter.git $HOME/app
-
-# Install specific versions of PyTorch and TorchVision
-RUN pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 -f https://download.pytorch.org/whl/torch_stable.html
-
-# Install dependencies
-RUN pip install --no-cache-dir -r requirements.txt \
- gradio==3.50.2 opencv-python transformers supervision
-
-# Download weights
-RUN mkdir -p $HOME/app/weigths
-RUN wget -c -O $HOME/app/weigths/i3d_rgb_imagenet.pt https://huggingface.co/camenduru/ProPainter/resolve/main/i3d_rgb_imagenet.pt
-RUN wget -c -O $HOME/app/weights/raft-things.pth https://huggingface.co/camenduru/ProPainter/resolve/main/raft-things.pth
-RUN wget -c -O $HOME/app/weights/recurrent_flow_completion.pth https://huggingface.co/camenduru/ProPainter/resolve/main/recurrent_flow_completion.pth
-RUN wget -c -O $HOME/app/weights/ProPainter.pth https://huggingface.co/camenduru/ProPainter/resolve/main/ProPainter.pth
-
-COPY app.py .
-
-RUN find $HOME/app
-
-# Set the environment variable to specify the GPU device
-ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
-ENV CUDA_VISIBLE_DEVICES=0
-
-# Run your app.py script
-CMD ["python", "app.py"]
diff --git a/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/login.html b/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/login.html
deleted file mode 100644
index dd89ac654bceca14a9dec7d1a7f8206d1425a7a1..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/login.html
+++ /dev/null
@@ -1,20 +0,0 @@
-{% extends "base.html" %}
-{% block content %}
-
-
- You must identify yourself first! We use a highly secured protocol
- where you just decide your username, and that's it. No password, no encryption,
- just pure trust.
-