diff --git a/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py b/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py deleted file mode 100644 index 8c859c080a9ac63ea90fec07c6640486c657cb05..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/testing/binghuan/BingHuan.py +++ /dev/null @@ -1,49 +0,0 @@ -import os,sys -import json -import subprocess -# from ...typing import sha256, Dict, get_type_hints - -url = 'https://b.ai-huan.xyz' -model = ['gpt-3.5-turbo', 'gpt-4'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'messages': messages, - 'model': model}, separators=(',', ':')) - cmd = ['python', f'{path}/helpers/binghuan.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - yield line.decode('cp1252') - - - -# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ -# '(%s)' % ', '.join( -# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) - - -# Temporary For ChatCompletion Class -class ChatCompletion: - @staticmethod - def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and needs_auth and not auth: - print( - f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - return (_create_completion(model, messages, stream, **kwargs) - if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) \ No newline at end of file diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/app.py b/spaces/123Kumar/vits-uma-genshin-honkai123/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
Cricket Wireless is a prepaid wireless service provider that offers a variety of plans and features for customers who want to bring their own device (BYOD) to the network. However, not all devices are compatible with Cricket's network, so you need to check your phone's compatibility before you switch.
-Download File ✦ https://byltly.com/2uKzbu
In this article, we will explain how to check if your phone is compatible with Cricket's BYOD program, what are the requirements and benefits of using your own device on Cricket, and what are some of the compatible devices that you can bring to Cricket.
-The easiest way to check if your phone is compatible with Cricket's network is to use their online IMEI checker tool. IMEI stands for International Mobile Equipment Identity, and it is a unique 15-digit number that identifies your device. You can find your IMEI by dialing *#06# on your phone's keypad, or by looking in your phone's settings or on the back of your device.
-Once you have your IMEI, go to https://www.cricketwireless.com/cell-phones/bring-your-phone and enter it in the box. The tool will tell you if your phone is compatible with Cricket's network, and if it is eligible for HD Voice, which is a feature that enhances the quality and clarity of voice calls.
-If your phone is not compatible, you may need to unlock it from your current carrier, or buy a new device that works on Cricket's network. You can also check out Cricket's list of compatible devices here.
-To bring your own device to Cricket, you need to meet the following requirements:
- -By bringing your own device to Cricket, you can enjoy the following benefits:
-Cricket has a wide range of compatible devices that you can bring to their network, including smartphones, feature phones, tablets, and data-only devices. Here are some examples of compatible devices that you can bring to Cricket:
-Brand | Model | ||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Apple | iPhone 6 and later | ||||||||||||||||||||||||||
Pixel 4 and later | |||||||||||||||||||||||||||
Samsung | Galaxy S9 and later | ||||||||||||||||||||||||||
LG | G8 ThinQ and later | ||||||||||||||||||||||||||
Moto | G Power and later | ||||||||||||||||||||||||||
Nokia | C5 Endi and later | ||||||||||||||||||||||||||
TCL | TCL 10 Pro and later | ||||||||||||||||||||||||||
Z ddb901b051 - - \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md deleted file mode 100644 index fe00faef9d89ebd9622966d90f2d9fd28f416fe6..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md +++ /dev/null @@ -1,166 +0,0 @@ - - Download Diablo 2 Fury Within 1.09: A Guide for Fans of the Classic Action RPG-If you are a fan of Diablo 2, one of the most popular and influential action role-playing games of all time, you might be interested in trying out a mod that adds new content, features, and challenges to the game. Diablo 2 Fury Within 1.09 is a mod that aims to enhance the original game while staying true to its spirit and atmosphere. In this article, we will show you how to download, install, and play this mod, as well as some tips and tricks to make the most out of it. -download diablo 2 fury within 1.09Download ———>>> https://byltly.com/2uKyDq - What is Diablo 2 Fury Within 1.09?-Diablo 2 Fury Within 1.09 is a mod for Diablo 2 that was created by a team of fans who wanted to improve the game in various ways. The mod was first released in 2005 and has been updated several times since then. The latest version, 1.09, was released in 2019. -The mod adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable. -How to download Diablo 2 Fury Within 1.09?-To download Diablo 2 Fury Within 1.09, you will need a few things: -
Once you have these things ready, you can follow these steps: -Requirements and compatibility-Before you download the mod, you should check if your system meets the minimum requirements to run it. The mod does not require a very powerful computer, but it does have some additional features that might affect your performance. -The minimum requirements are: -
You should also check if your version of Diablo 2 is compatible with the mod. The mod works best with version 1.10 or higher of both Diablo 2 and Lord of Destruction expansion. If you have an older version, you might encounter some issues or bugs. -To check your version of Diablo 2, you can open the game launcher and look at the bottom left corner of the screen. You should see something like "Version x.xx". If you have an older version than 1.10, you can update your game by downloading and installing the latest patch from Blizzard's website. -How to download diablo 2 fury within 1.09 for free Download links and sources-Once you have verified your requirements and compatibility, you can proceed to download the mod files. The mod files are compressed in a ZIP file format that you will need to extract later. -The official source for downloading the mod is its website: http://furywithin.org/. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates. -The direct link for downloading the mod file is: http://furywithin.org/download/FuryWithin109.zip. The file size is about 800 MB. -You should always download the mod from its official source or from trusted websites that host it. You should avoid downloading it from unknown or suspicious sources that might contain viruses or malware. -You should also verify the authenticity of the file by checking its checksum value. A checksum is a unique code that identifies a file based on its content. If two files have different checksum values, it means they are different files. -The official checksum value for the mod file is: -
-You can use online tools such as https://md5file.com/calculator or https://emn178.github.io/online-tools/sha256_checksum.html to calculate the checksum value of your downloaded file and compare it with the official one. -Installation process-After you have downloaded and verified the mod file, you can proceed to install it in your Diablo 2 folder. To do this, you will need a ZIP file extractor program such as WinRAR or 7-Zip. -You can follow these steps: -
How to play Diablo 2 Fury Within 1.09?-To play Diablo 2 Fury Within 1.09, you just need to launch your Diablo 2 game as usual. You should see a new splash screen with the mod logo and version number. -You can create a new character or use an existing one to play the mod. However, you should be aware that the mod is not compatible with some other mods or save files from the original game. You might encounter some errors or crashes if you try to use them. -You should also backup your save files before playing the mod, in case you want to revert to the original game or switch to another mod. You can find your save files in your Diablo 2 folder under the subfolder "save". You can copy them to another location for safekeeping. -Once you are in the game, you can enjoy the mod and its features. Here are some tips and tricks to help you: -New features and changes-The mod adds a lot of new content and changes to the game. Some of the main ones are: -
You can access these options by clicking on the icons on the bottom right corner of the screen or by pressing the corresponding hotkeys (F1-F12). -New classes and skills-The mod adds six new classes to the game, each with their own unique skills and playstyles. They are: -
You can choose one of these classes when creating a new character or use a Rebirth option to change your existing character's class. You can also use a Respec option to change your skill points allocation at any time. -Each class has three skill trees with 10 skills each. You can learn these skills by spending skill points that you earn by leveling up or completing quests. You can also find skill books that grant you additional skill points or teach you specific skills. -Some skills have synergies with other skills, meaning they become more powerful when combined together. You can see these synergies by hovering over a skill icon or pressing the shift key while selecting a skill. -New items and crafting-, materials, recipes, formulas, catalysts, and more. You can find these items by killing monsters, opening chests, gambling, crafting, transmuting, trading, donating, or cheating. -Some items have special properties such as prefixes, suffixes, set bonuses, unique effects, ethereal quality, socketed slots, and more. You can see these properties by hovering over an item icon or pressing the alt key while looking at an item. -Some items can be upgraded or modified using other items such as runes, charms, jewels, gems, materials, recipes, formulas, catalysts, and more. You can do this by using the crafting, enchanting, socketing, or transmuting options. -Crafting is a new feature that allows you to create new items using materials and recipes. Materials are items that can be used as ingredients for crafting. Recipes are items that can be used as instructions for crafting. You can find materials and recipes by killing monsters, opening chests, gambling, transmuting, trading, donating, or cheating. -To craft an item, you need to have the required materials and recipe in your inventory. Then you need to click on the crafting icon or press the F6 key to open the crafting window. Here you can see the list of available recipes and their requirements. You can select a recipe and click on the craft button to create the item. -, set bonuses, unique effects, ethereal quality, socketed slots, and more. -How to troubleshoot Diablo 2 Fury Within 1.09?-Diablo 2 Fury Within 1.09 is a mod that modifies the original game in many ways. As such, it might cause some issues or problems for some players. Here are some common issues and solutions for playing the mod: -Compatibility issues-The mod might not work well with other mods, patches, or versions of Diablo 2. If you have installed or used any other mods or patches before or after installing the mod, you might encounter some errors or crashes. -To fix this, you should uninstall or remove any other mods or patches from your Diablo 2 folder. You should also make sure that your version of Diablo 2 and Lord of Destruction expansion is 1.10 or higher. You can update your game by downloading and installing the latest patch from Blizzard's website. -Performance issues-The mod might affect your game performance in terms of speed, graphics, sound, or stability. If you experience any lag, stuttering, freezing, crashing, or other performance issues while playing the mod, you might need to optimize your settings and system. -To fix this, you should lower your game resolution, quality, and sound options in the game menu. You should also close any unnecessary programs or processes running in the background of your computer. You should also scan your computer for viruses or malware that might slow it down. -Bug reports and feedback-The mod might have some bugs or glitches that affect your gameplay experience. If you encounter any bugs or glitches while playing the mod, you should report them to the mod developers and community. -To do this, you should visit the mod website: http://furywithin.org/. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates. -, logs, system specifications, and steps to reproduce the issue. You should also be polite and respectful when reporting or giving feedback. -Conclusion-Diablo 2 Fury Within 1.09 is a mod that enhances the original game in various ways. It adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable. -To download and play the mod, you need to have a copy of Diablo 2 and Lord of Destruction expansion with version 1.10 or higher. You also need to download the mod file from its official website and extract and copy it to your Diablo 2 folder. You can then launch your game as usual and enjoy the mod and its features. -If you encounter any issues or problems while playing the mod, you can try to fix them by checking your requirements and compatibility, optimizing your settings and system, or reporting them to the mod developers and community. -If you are a fan of Diablo 2 and want to experience a new and improved version of the game, you should definitely try out Diablo 2 Fury Within 1.09. It is one of the best mods for Diablo 2 that will keep you entertained for hours. -FAQs-Here are some frequently asked questions about Diablo 2 Fury Within 1.09: -
, fun is subjective and depends on your personal preferences and tastes. You might like or dislike the mod for different reasons. The best way to find out if you like the mod is to try it yourself. - 0a6ba089eb- - \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md deleted file mode 100644 index 27febaddbefecff4e2717fdd1d6e3a95d967e967..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md +++ /dev/null @@ -1,33 +0,0 @@ - - How to Get WinRAR for Free on Mac-WinRAR is a popular file compression and archiving software that can handle various formats such as RAR, ZIP, CAB, ARJ, LZH, TAR, GZip, UUE, ISO, BZIP2, Z and 7-Zip. WinRAR can help you reduce the size of your files, save disk space, and speed up file transfer. WinRAR also offers features such as encryption, password protection, split archives, and recovery of damaged files. -free winrar macDownload ---> https://byltly.com/2uKzMt - However, WinRAR is not available for Mac as a graphical user interface (GUI) application. If you want to use WinRAR on Mac, you have to use the command-line version, which requires some technical skills and may not be convenient for most users. Alternatively, you can use one of the many WinRAR alternatives for Mac that offer similar or better functionality and user experience. -In this article, we will show you how to get WinRAR for free on Mac by using one of the best WinRAR alternatives: Bandizip. Bandizip is a freemium file compression and archiving software that supports various formats such as RAR, ZIP, 7Z, TAR, GZ, ISO, and more. Bandizip also offers features such as encryption, password protection, split archives, preview files, and extraction of multiple archives at once. -Bandizip is easy to use and has a simple and intuitive interface. You can download Bandizip for free from its official website or from the Mac App Store. Bandizip works on macOS 10.10 or later and requires 64-bit processor. Here are the steps to get WinRAR for free on Mac by using Bandizip: -
To extract a RAR archive using Bandizip, you can follow these steps: -
As you can see, Bandizip is a powerful and easy-to-use file compression and archiving software that can help you get WinRAR for free on Mac. Bandizip also has many other features and options that you can explore and customize according to your preferences. Bandizip is a great WinRAR alternative for Mac that you should try today! - ddb901b051- - \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md deleted file mode 100644 index 0c26ef1645a18cb4b2392891429d34b2f2702cbb..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md +++ /dev/null @@ -1,25 +0,0 @@ - - How to Download the Complete Book of the Olympics PDF for Free-If you are a fan of the Olympic Games and want to learn more about their history, records, and trivia, you might be interested in reading The Complete Book of the Olympics by David Wallechinsky. This book is a comprehensive guide to every edition of the modern Olympics, from Athens 1896 to Tokyo 2020. It covers all the sports, events, athletes, medals, controversies, and stories that have shaped the Olympic movement. -Complete Book Of Olympics Pdf DownloadDownload File · https://imgfil.com/2uy0Ct - However, this book is not easy to find in print or online. It is out of stock on most bookstores and libraries, and there is no official digital version available. So how can you download the complete book of the Olympics PDF for free? -The answer is simple: you can use the Internet Archive. The Internet Archive is a non-profit organization that preserves and provides access to millions of books, movies, music, websites, and other digital media. It has a huge collection of public domain and out-of-print books that you can download or read online for free. -One of these books is The Complete Book of the Olympics by David Wallechinsky. The Internet Archive has a scanned copy of the 1988 edition of this book, which covers the Olympics from 1896 to 1988. You can access this book by visiting this link: https://archive.org/details/completebookofol00wall. On this page, you can see a preview of the book, read it online, or download it as a PDF file. - -To download the complete book of the Olympics PDF for free, you need to follow these steps: -
That's it! You have successfully downloaded the complete book of the Olympics PDF for free. You can now enjoy reading this amazing book and learn more about the Olympic Games. - -If you are wondering why you should read The Complete Book of the Olympics by David Wallechinsky, here are some reasons: -
So what are you waiting for? Download the complete book of the Olympics PDF for free today and enjoy reading this masterpiece of Olympic literature. d5da3c52bf- - \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md deleted file mode 100644 index 1234763ec9ff057b10bb87c1a94de2824173f220..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md +++ /dev/null @@ -1,126 +0,0 @@ - - Dinosaur Sim APK: A Fun and Educational Game for Dinosaur Lovers-Do you love dinosaurs? Do you want to play as one of them and explore a realistic 3D environment? Do you want to learn more about these amazing creatures and their fossils? If you answered yes to any of these questions, then you should try Dinosaur Sim APK, a game that allows you to play as one of the 25 popular dinosaurs and experience their life in different game modes. In this article, we will tell you what Dinosaur Sim APK is, what features it has, how to download and install it, and what are its pros and cons. -dinosaur sim apkDownload ⭐ https://urlin.us/2uSUQv - What is Dinosaur Sim APK?-Dinosaur Sim APK is an Android game developed by 3583 Bytes, a studio that specializes in creating simulation games. It is a game that lets you play as one of the 25 realistic dinosaurs, each with its own animations and sounds. You can fight your way to the top of the food chain or play as a peaceful herbivore in a realistic 3D environment. You can also learn about each of the dinosaurs in the game, color your favorite dinosaurs, and learn about fossils and dinosaur bones in different game modes. Dinosaur Sim APK is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages. -Features of Dinosaur Sim APK-Dinosaur Sim APK has many features that make it an enjoyable and informative game. Here are some of them: -- 25 Playable Dinosaurs-You can choose from 25 different dinosaurs to play as, each with its own characteristics, abilities, and challenges. You can play as carnivores, herbivores, or omnivores, and experience their life in the wild. Some of the dinosaurs you can play as are Tyrannosaurus Rex, Triceratops, Velociraptor, Stegosaurus, Brachiosaurus, Spinosaurus, and more. -- 4 Game Modes-You can play Dinosaur Sim APK in four different game modes, each with its own objectives and features. They are: -
- Realistic 3D Graphics and Animations-Dinosaur Sim APK has stunning 3D graphics and animations that make the game look realistic and immersive. The dinosaurs are beautifully modeled and textured, and they move and sound like real animals. The environment is also detailed and varied, with different terrains, plants, weather effects, day and night cycles, and more. -dinosaur sim apk download - Educational Content-Dinosaur Sim APK is not only a fun game but also an educational one. It teaches you about dinosaurs and their history in an engaging way. You can learn about their anatomy, evolution, classification, behavior, diet, habitat, and more. You can also learn about fossils and how they are formed and studied. The game has a lot of educational content that will enrich your knowledge and curiosity about dinosaurs. -How to Download and Install Dinosaur Sim APK?-If you want to play Dinosaur Sim APK on your Android device, you need to download and install it first. Here are the requirements and steps to do so: -- Requirements-To play Dinosaur Sim APK, you need to have an Android device that meets the following requirements: -
- Steps-To download and install Dinosaur Sim APK, you need to follow these steps: -
Pros and Cons of Dinosaur Sim APK-Dinosaur Sim APK is a game that has many advantages but also some disadvantages. Here are some of them: -- Pros-
- Cons-
Conclusion-Dinosaur Sim APK is a game that lets you play as one of the 25 realistic dinosaurs and experience their life in different game modes. It is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages. You can download and install it for free on your Android device and enjoy playing as a dinosaur. However, you should also be aware of its pros and cons before playing it. We hope this article has helped you learn more about Dinosaur Sim APK and how to play it. If you have any questions or feedback, feel free to leave them in the comments section below. -FAQs-Here are some frequently asked questions about Dinosaur Sim APK: -
The latest version of Dinosaur Sim APK is 1.5.0, which was released on June 15, 2023. It added new dinosaurs, new features, bug fixes, and performance improvements. -You can play Dinosaur Sim APK offline in some game modes, such as Dino Simulator mode and Dino Paint mode. However, you need an internet connection for some features, such as saving your progress or sharing your creations. -You can play Dinosaur Sim APK on PC by using an Android emulator, such as BlueStacks or NoxPlayer. However, you may experience some compatibility issues or performance issues depending on your PC specifications. -You can update Dinosaur Sim APK by downloading the latest version from the official website or by checking for updates in the game settings. You should always update the game to enjoy the latest features and bug fixes. -Dinosaur Sim APK is safe to download and install on your Android device, as long as you download it from the official website or a trusted source. However, you should always be careful when installing apps from unknown sources and check the permissions and reviews before installing them. 197e85843d- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md b/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md deleted file mode 100644 index 2020ae450bec4f008abfa88f4b0b77d37b236a26..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md +++ /dev/null @@ -1,114 +0,0 @@ - - Parking Master Multiplayer 2 Mod APK 2023: The Ultimate Parking Game-Introduction-Do you love parking games? Do you want to test your driving skills and challenge your friends in a realistic and fun parking simulator? If yes, then you should try Parking Master Multiplayer 2, the best parking game for Android devices. -parking master multiplayer 2 mod apk 2023Download ———>>> https://jinyurl.com/2uNNll - What is Parking Master Multiplayer 2?-Parking Master Multiplayer 2 is a parking game developed by \uE000Games\uE001 Studio, a popular game developer that has created many other successful games such as \uE000Racing\uE001 Fever and \uE000Drift\uE001 Max. In this game, you can choose from a variety of cars, from sports cars to trucks, and park them in different scenarios, such as city streets, parking lots, airports, and more. You can also customize your cars with different colors, stickers, wheels, and accessories. -Why do you need Parking Master Multiplayer 2 Mod APK 2023?-Parking Master Multiplayer 2 is a free game, but it has some limitations that can affect your gaming experience. For example, you need to watch ads to get more fuel or unlock new cars. You also need to earn coins and gems to upgrade your cars or buy new ones. These things can be frustrating and time-consuming, especially if you want to enjoy the game without any interruptions or restrictions. -That's why you need Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, and more. With this mod apk, you can play the game as much as you want, without worrying about running out of fuel or watching annoying ads. You can also access all the cars in the game, from the cheapest to the most expensive ones, and customize them to your liking. You can also enjoy the realistic graphics and physics of the game, as well as the multiplayer mode that lets you compete with other players online. -Features of Parking Master Multiplayer 2 Mod APK 2023-Unlimited Fuel-One of the main features of Parking Master Multiplayer 2 Mod APK 2023 is unlimited fuel. In the original game, you have a limited amount of fuel that decreases as you drive your car. When you run out of fuel, you have to watch an ad or pay with gems to refill it. This can be annoying and interrupt your gameplay. -With Parking Master Multiplayer 2 Mod APK 2023, you don't have to worry about fuel anymore. You have unlimited fuel that never runs out, no matter how long or how far you drive your car. You can play the game without any interruptions or limitations. -No Ads-Another feature of Parking Master Multiplayer 2 Mod APK 2023 is no ads. In the original game, you have to watch ads to get more fuel, unlock new cars, or get extra rewards. These ads can be boring and waste your time. -With Parking Master Multiplayer 2 Mod APK 2023, you don't have to watch any ads at all. You can play the game without any distractions or delays. You can also save your mobile data and battery life by avoiding unnecessary ads. -All Cars Unlocked-A third feature of Parking Master Multiplayer 2 Mod APK 2023 is all cars unlocked. In the original game, you have to earn coins and gems to unlock new cars or buy them with real money. There are many cars in the game, from sports cars to trucks, but they are not all available at the beginning. You have to complete levels and missions to unlock them or pay for them. -With Parking Master Multiplayer 2 Mod APK 2023, you don't have to do any of that. You can access all the cars in the game from the start, without spending any coins, gems, or money. You can choose any car you want and enjoy its features and performance. -parking master multiplayer 2 mod apk 2023 download Realistic Graphics and Physics-A fourth feature of Parking Master Multiplayer 2 Mod APK 2023 is realistic graphics and physics. The game has amazing graphics that make you feel like you are driving a real car in a real environment. The game also has realistic physics that simulate the behavior of the car and the environment, such as gravity, friction, inertia, and collision. -With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the same graphics and physics as the original game, but with better performance and smoother gameplay. You can also adjust the graphics settings to suit your device and preference. -Multiplayer Mode-A fifth feature of Parking Master Multiplayer 2 Mod APK 2023 is multiplayer mode. The game has a multiplayer mode that lets you play with other players online. You can join or create a room and invite your friends or random players to join you. You can also chat with them and see their scores and rankings. -With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the multiplayer mode without any limitations or problems. You can play with anyone you want, without worrying about lagging or disconnecting. You can also have more fun and challenge by competing with other players who have the same mod apk as you. -How to download and install Parking Master Multiplayer 2 Mod APK 2023?-Step 1: Download the APK file from the link below-The first step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to download the APK file from the link below. The link will take you to a secure and reliable website where you can download the file safely and quickly. -Download Parking Master Multiplayer 2 Mod APK 2023 here -Step 2: Enable unknown sources on your device-The second step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to enable unknown sources on your device. This is necessary because the mod apk is not from the official Google Play Store, so you need to allow your device to install apps from other sources. -To enable unknown sources, go to your device settings, then security, then unknown sources. Turn on the switch or check the box to enable it. You may also see a pop-up message asking for your permission. Tap on OK or Allow to confirm it. -Step 3: Install the APK file and enjoy the game-The third step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to install the APK file and enjoy the game. To install the APK file, go to your file manager or downloads folder and find the file you downloaded. Tap on it and follow the instructions on the screen to install it. -Once the installation is done, you can open the game and start playing it. You will see that you have unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode. You can also customize your cars and settings as you wish. -Conclusion-Parking Master Multiplayer 2 is a parking game that tests your driving skills and challenges your friends in a realistic and fun parking simulator. It has many features that make it one of the best parking games for Android devices. -However, if you want to enjoy the game without any limitations or interruptions, you should download Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode. -To download Parking Master Multiplayer 2 Mod APK 2023, just follow these three simple steps: -
Parking Master Multiplayer 2 Mod APK 2023 is a great way to have more fun and challenge in parking games. Download it now and see for yourself! -FAQs-Here are some frequently asked questions about Parking Master Multiplayer 2 Mod APK 2023: -
- - \ No newline at end of file diff --git a/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md b/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md deleted file mode 100644 index 32a15afd7544b8cfecb727231432376aa8c9917e..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md +++ /dev/null @@ -1,3 +0,0 @@ -dummy3 policy - -https://voicevox.hiroshiba.jp/ diff --git a/spaces/30Kanika/Animal_Image_Classifier/README.md b/spaces/30Kanika/Animal_Image_Classifier/README.md deleted file mode 100644 index 6af4859a98fd15fa34682e51d0ea614b75606bce..0000000000000000000000000000000000000000 --- a/spaces/30Kanika/Animal_Image_Classifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Animal Image Classifier -emoji: 🌍 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py b/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py deleted file mode 100644 index 99dd5ced088d6d8c11c2fb46c0778c69286685f1..0000000000000000000000000000000000000000 --- a/spaces/52Hz/CMFNet_dehazing/model/CMFNet.py +++ /dev/null @@ -1,191 +0,0 @@ -import torch -import torch.nn as nn -from model.block import SAB, CAB, PAB, conv, SAM, conv3x3, conv_down - -########################################################################## -## U-Net -bn = 2 # block number-1 - -class Encoder(nn.Module): - def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block): - super(Encoder, self).__init__() - if block == 'CAB': - self.encoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'PAB': - self.encoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'SAB': - self.encoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.encoder_level1 = nn.Sequential(*self.encoder_level1) - self.encoder_level2 = nn.Sequential(*self.encoder_level2) - self.encoder_level3 = nn.Sequential(*self.encoder_level3) - self.down12 = DownSample(n_feat, scale_unetfeats) - self.down23 = DownSample(n_feat + scale_unetfeats, scale_unetfeats) - - def forward(self, x): - enc1 = self.encoder_level1(x) - x = self.down12(enc1) - enc2 = self.encoder_level2(x) - x = self.down23(enc2) - enc3 = self.encoder_level3(x) - return [enc1, enc2, enc3] - -class Decoder(nn.Module): - def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block): - super(Decoder, self).__init__() - if block == 'CAB': - self.decoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'PAB': - self.decoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - elif block == 'SAB': - self.decoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)] - self.decoder_level1 = nn.Sequential(*self.decoder_level1) - self.decoder_level2 = nn.Sequential(*self.decoder_level2) - self.decoder_level3 = nn.Sequential(*self.decoder_level3) - if block == 'CAB': - self.skip_attn1 = CAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - if block == 'PAB': - self.skip_attn1 = PAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - if block == 'SAB': - self.skip_attn1 = SAB(n_feat, kernel_size, reduction, bias=bias, act=act) - self.skip_attn2 = SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) - self.up21 = SkipUpSample(n_feat, scale_unetfeats) - self.up32 = SkipUpSample(n_feat + scale_unetfeats, scale_unetfeats) - - def forward(self, outs): - enc1, enc2, enc3 = outs - dec3 = self.decoder_level3(enc3) - x = self.up32(dec3, self.skip_attn2(enc2)) - dec2 = self.decoder_level2(x) - x = self.up21(dec2, self.skip_attn1(enc1)) - dec1 = self.decoder_level1(x) - return [dec1, dec2, dec3] - -########################################################################## -##---------- Resizing Modules ---------- -class DownSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(DownSample, self).__init__() - self.down = nn.Sequential(nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels, in_channels + s_factor, 1, stride=1, padding=0, bias=False)) - - def forward(self, x): - x = self.down(x) - return x - -class UpSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(UpSample, self).__init__() - self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False)) - - def forward(self, x): - x = self.up(x) - return x - -class SkipUpSample(nn.Module): - def __init__(self, in_channels, s_factor): - super(SkipUpSample, self).__init__() - self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False)) - - def forward(self, x, y): - x = self.up(x) - x = x + y - return x - -########################################################################## -# Mixed Residual Module -class Mix(nn.Module): - def __init__(self, m=1): - super(Mix, self).__init__() - w = nn.Parameter(torch.FloatTensor([m]), requires_grad=True) - w = nn.Parameter(w, requires_grad=True) - self.w = w - self.mix_block = nn.Sigmoid() - - def forward(self, fea1, fea2, feat3): - factor = self.mix_block(self.w) - other = (1 - factor)/2 - output = fea1 * other.expand_as(fea1) + fea2 * factor.expand_as(fea2) + feat3 * other.expand_as(feat3) - return output, factor - -########################################################################## -# Architecture -class CMFNet(nn.Module): - def __init__(self, in_c=3, out_c=3, n_feat=96, scale_unetfeats=48, kernel_size=3, reduction=4, bias=False): - super(CMFNet, self).__init__() - - p_act = nn.PReLU() - self.shallow_feat1 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - self.shallow_feat2 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - self.shallow_feat3 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act, - conv(n_feat // 2, n_feat, kernel_size, bias=bias)) - - self.stage1_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB') - self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB') - - self.stage2_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB') - self.stage2_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB') - - self.stage3_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB') - self.stage3_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB') - - self.sam1o = SAM(n_feat, kernel_size=3, bias=bias) - self.sam2o = SAM(n_feat, kernel_size=3, bias=bias) - self.sam3o = SAM(n_feat, kernel_size=3, bias=bias) - - self.mix = Mix(1) - self.add123 = conv(out_c, out_c, kernel_size, bias=bias) - self.concat123 = conv(n_feat*3, n_feat, kernel_size, bias=bias) - self.tail = conv(n_feat, out_c, kernel_size, bias=bias) - - - def forward(self, x): - ## Compute Shallow Features - shallow1 = self.shallow_feat1(x) - shallow2 = self.shallow_feat2(x) - shallow3 = self.shallow_feat3(x) - - ## Enter the UNet-CAB - x1 = self.stage1_encoder(shallow1) - x1_D = self.stage1_decoder(x1) - ## Apply SAM - x1_out, x1_img = self.sam1o(x1_D[0], x) - - ## Enter the UNet-PAB - x2 = self.stage2_encoder(shallow2) - x2_D = self.stage2_decoder(x2) - ## Apply SAM - x2_out, x2_img = self.sam2o(x2_D[0], x) - - ## Enter the UNet-SAB - x3 = self.stage3_encoder(shallow3) - x3_D = self.stage3_decoder(x3) - ## Apply SAM - x3_out, x3_img = self.sam3o(x3_D[0], x) - - ## Aggregate SAM features of Stage 1, Stage 2 and Stage 3 - mix_r = self.mix(x1_img, x2_img, x3_img) - mixed_img = self.add123(mix_r[0]) - - ## Concat SAM features of Stage 1, Stage 2 and Stage 3 - concat_feat = self.concat123(torch.cat([x1_out, x2_out, x3_out], 1)) - x_final = self.tail(concat_feat) - - return x_final + mixed_img \ No newline at end of file diff --git a/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md b/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md deleted file mode 100644 index e4821a5e6f0e0b1c4b70260eca077c703ca7c75c..0000000000000000000000000000000000000000 --- a/spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 08 Search Streamlit Session State QueryParameters -emoji: 🔎🧠 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py b/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py deleted file mode 100644 index 56000ede939b2328cb4aea13c5827dc072c9fe0e..0000000000000000000000000000000000000000 --- a/spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py +++ /dev/null @@ -1,169 +0,0 @@ -import gradio as gr -import torch -import time -import librosa -import soundfile -import nemo.collections.asr as nemo_asr -import tempfile -import os -import uuid - -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration -import torch - -# PersistDataset ----- -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -# --------------------------------------------- -# Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions -# This should allow you to save your results to your own Dataset hosted on HF. --- -#DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv" -#DATASET_REPO_ID = "awacke1/Carddata.csv" -#DATA_FILENAME = "Carddata.csv" -#DATA_FILE = os.path.join("data", DATA_FILENAME) -#HF_TOKEN = os.environ.get("HF_TOKEN") -#SCRIPT = """ - -# -#""" - -#try: -# hf_hub_download( -# repo_id=DATASET_REPO_ID, -# filename=DATA_FILENAME, -# cache_dir=DATA_DIRNAME, -# force_filename=DATA_FILENAME -# ) -#except: -# print("file not found") -#repo = Repository( -# local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -#) - -#def store_message(name: str, message: str): -# if name and message: -# with open(DATA_FILE, "a") as csvfile: -# writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) -# writer.writerow( -# {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} -# ) -# # uncomment line below to begin saving - -# commit_url = repo.push_to_hub() -# return "" - -#iface = gr.Interface( -# store_message, -# [ -# inputs.Textbox(placeholder="Your name"), -# inputs.Textbox(placeholder="Your message", lines=2), -# ], -# "html", -# css=""" -# .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } -# """, -# title="Reading/writing to a HuggingFace dataset repo from Spaces", -# description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", -# article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", -#) - - -# main ------------------------- -mname = "facebook/blenderbot-400M-distill" -model = BlenderbotForConditionalGeneration.from_pretrained(mname) -tokenizer = BlenderbotTokenizer.from_pretrained(mname) - -def take_last_tokens(inputs, note_history, history): - """Filter the last 128 tokens""" - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = [' " + body + " \r\n\t\r\n\r\n",
- error: ({ status, message }) => "\n\n\t\n\t\t\n\t\t\n\t\t\t" + status + "\n\t\t\t \n\t\t \n\t\n\n"
- },
- version_hash: "r3vpsq"
-};
-
-export function get_hooks() {
- return import("../../../src/hooks.server.ts");
-}
-
-export { set_assets, set_building, set_private_env, set_public_env };
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts
deleted file mode 100644
index c4c51077af8f6ac7a90522915c567e39e6e8e75f..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts
+++ /dev/null
@@ -1,59 +0,0 @@
-import ColorInputBase from '../colorinputbase/ColorInputBase';
-import RoundRectangle from '../../roundrectangle/RoundRectangle';
-import ColorComponents from '../colorcomponents/ColorComponents';
-import CanvasInput from '../../canvasinput/CanvasInput';
-
-export default ColorInput;
-
-declare namespace ColorInput {
- type TransitCallbackType = (
- gameObject: Phaser.GameObjects.GameObject,
- duration: number
- ) => void;
-
- interface IConfig extends ColorInputBase.IConfig {
- colorPicker?: {
- width?: number, height?: number,
-
- background?: RoundRectangle.IConfig,
- createBackgroundCallback: (
- scene: Phaser.Scene,
- ) => Phaser.GameObjects.GameObject,
-
- hPalettePosition?: 0 | 1 | 2 | 3 | 'bottom' | 'left' | 'top' | 'right',
-
- expandDirection?: 0 | 1 | 'down' | 'up',
-
- easeIn?: number, easeOut?: number,
-
- transitIn?: TransitCallbackType,
- transitOut?: TransitCallbackType,
-
- bounds?: Phaser.Geom.Rectangle;
-
- space?: {
- left?: number, right?: number, top?: number, bottom?: number,
- item?: number,
- }
- },
-
- colorComponents?: {
- height?: number,
-
- formatLabel?: ColorComponents.IFormatLabelConfig,
-
- inputText?: CanvasInput.IConfig,
-
- space?: {
- left?: number, right?: number, top?: number, bottom?: number,
- },
- }
- }
-}
-
-declare class ColorInput extends ColorInputBase {
- constructor(
- scene: Phaser.Scene,
- config?: ColorInput.IConfig
- );
-}
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js
deleted file mode 100644
index 94439e7a9a4e84776817d958d8392761d597257c..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import RoundRectangle from './RoundRectangle.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('roundRectangle', function (x, y, width, height, radiusConfig, fillColor, fillAlpha) {
- var gameObject = new RoundRectangle(this.scene, x, y, width, height, radiusConfig, fillColor, fillAlpha);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.UI.RoundRectangle', RoundRectangle);
-
-export default RoundRectangle;
\ No newline at end of file
diff --git a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py b/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py
deleted file mode 100644
index a39a19458c75449c65d3e7810974eededb9d2d67..0000000000000000000000000000000000000000
--- a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from torch import nn
-import torch
-from torchvision import models
-
-class KPDetector(nn.Module):
- """
- Predict K*5 keypoints.
- """
-
- def __init__(self, num_tps, **kwargs):
- super(KPDetector, self).__init__()
- self.num_tps = num_tps
-
- self.fg_encoder = models.resnet18(pretrained=False)
- num_features = self.fg_encoder.fc.in_features
- self.fg_encoder.fc = nn.Linear(num_features, num_tps*5*2)
-
-
- def forward(self, image):
-
- fg_kp = self.fg_encoder(image)
- bs, _, = fg_kp.shape
- fg_kp = torch.sigmoid(fg_kp)
- fg_kp = fg_kp * 2 - 1
- out = {'fg_kp': fg_kp.view(bs, self.num_tps*5, -1)}
-
- return out
diff --git a/spaces/AlexKozachuk/anything-v3.0/README.md b/spaces/AlexKozachuk/anything-v3.0/README.md
deleted file mode 100644
index d2e09658fa22b5fdc59854bde8a4ffb008f84df3..0000000000000000000000000000000000000000
--- a/spaces/AlexKozachuk/anything-v3.0/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Anything V3.0
-emoji: 🏃
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.10.1
-app_file: app.py
-pinned: false
-duplicated_from: yuessiah/anything-v3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Allakhazam/Home/README.md b/spaces/Allakhazam/Home/README.md
deleted file mode 100644
index 18d737702c56bc72f1bb74db10d167c23e28b23f..0000000000000000000000000000000000000000
--- a/spaces/Allakhazam/Home/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Home Prompts
-emoji: 🏆
-colorFrom: indigo
-colorTo: purple
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py
deleted file mode 100644
index 18176ee8eb0d992d69d5b951d7f36e2efa92a37b..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py
+++ /dev/null
@@ -1,17 +0,0 @@
-
-import torch
-import clip
-
-
-class CLIPLoss(torch.nn.Module):
-
- def __init__(self, opts):
- super(CLIPLoss, self).__init__()
- self.model, self.preprocess = clip.load("ViT-B/32", device="cuda")
- self.upsample = torch.nn.Upsample(scale_factor=7)
- self.avg_pool = torch.nn.AvgPool2d(kernel_size=opts.stylegan_size // 32)
-
- def forward(self, image, text):
- image = self.avg_pool(self.upsample(image))
- similarity = 1 - self.model(image, text)[0] / 100
- return similarity
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
deleted file mode 100644
index e58952aa207fc6b6211f3e8faf6f93992d576acf..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-import torch
-import tqdm
-
-from ...models.unet_1d import UNet1DModel
-from ...pipelines import DiffusionPipeline
-from ...utils import randn_tensor
-from ...utils.dummy_pt_objects import DDPMScheduler
-
-
-class ValueGuidedRLPipeline(DiffusionPipeline):
- r"""
- Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
-
- Parameters:
- value_function ([`UNet1DModel`]):
- A specialized UNet for fine-tuning trajectories base on reward.
- unet ([`UNet1DModel`]):
- UNet architecture to denoise the encoded trajectories.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
- application is [`DDPMScheduler`].
- env ():
- An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
- """
-
- def __init__(
- self,
- value_function: UNet1DModel,
- unet: UNet1DModel,
- scheduler: DDPMScheduler,
- env,
- ):
- super().__init__()
- self.value_function = value_function
- self.unet = unet
- self.scheduler = scheduler
- self.env = env
- self.data = env.get_dataset()
- self.means = {}
- for key in self.data.keys():
- try:
- self.means[key] = self.data[key].mean()
- except: # noqa: E722
- pass
- self.stds = {}
- for key in self.data.keys():
- try:
- self.stds[key] = self.data[key].std()
- except: # noqa: E722
- pass
- self.state_dim = env.observation_space.shape[0]
- self.action_dim = env.action_space.shape[0]
-
- def normalize(self, x_in, key):
- return (x_in - self.means[key]) / self.stds[key]
-
- def de_normalize(self, x_in, key):
- return x_in * self.stds[key] + self.means[key]
-
- def to_torch(self, x_in):
- if type(x_in) is dict:
- return {k: self.to_torch(v) for k, v in x_in.items()}
- elif torch.is_tensor(x_in):
- return x_in.to(self.unet.device)
- return torch.tensor(x_in, device=self.unet.device)
-
- def reset_x0(self, x_in, cond, act_dim):
- for key, val in cond.items():
- x_in[:, key, act_dim:] = val.clone()
- return x_in
-
- def run_diffusion(self, x, conditions, n_guide_steps, scale):
- batch_size = x.shape[0]
- y = None
- for i in tqdm.tqdm(self.scheduler.timesteps):
- # create batch of timesteps to pass into model
- timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
- for _ in range(n_guide_steps):
- with torch.enable_grad():
- x.requires_grad_()
-
- # permute to match dimension for pre-trained models
- y = self.value_function(x.permute(0, 2, 1), timesteps).sample
- grad = torch.autograd.grad([y.sum()], [x])[0]
-
- posterior_variance = self.scheduler._get_variance(i)
- model_std = torch.exp(0.5 * posterior_variance)
- grad = model_std * grad
-
- grad[timesteps < 2] = 0
- x = x.detach()
- x = x + scale * grad
- x = self.reset_x0(x, conditions, self.action_dim)
-
- prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
-
- # TODO: verify deprecation of this kwarg
- x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"]
-
- # apply conditions to the trajectory (set the initial state)
- x = self.reset_x0(x, conditions, self.action_dim)
- x = self.to_torch(x)
- return x, y
-
- def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
- # normalize the observations and create batch dimension
- obs = self.normalize(obs, "observations")
- obs = obs[None].repeat(batch_size, axis=0)
-
- conditions = {0: self.to_torch(obs)}
- shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
-
- # generate initial noise and apply our conditions (to make the trajectories start at current state)
- x1 = randn_tensor(shape, device=self.unet.device)
- x = self.reset_x0(x1, conditions, self.action_dim)
- x = self.to_torch(x)
-
- # run the diffusion process
- x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
-
- # sort output trajectories by value
- sorted_idx = y.argsort(0, descending=True).squeeze()
- sorted_values = x[sorted_idx]
- actions = sorted_values[:, :, : self.action_dim]
- actions = actions.detach().cpu().numpy()
- denorm_actions = self.de_normalize(actions, key="actions")
-
- # select the action with the highest value
- if y is not None:
- selected_index = 0
- else:
- # if we didn't run value guiding, select a random action
- selected_index = np.random.randint(0, batch_size)
-
- denorm_actions = denorm_actions[selected_index, 0]
- return denorm_actions
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py
deleted file mode 100644
index b4eabb9e3a0e18dd71a445bb8960b27d8699daac..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import numpy as np
-import torch
-
-from diffusers import VersatileDiffusionImageVariationPipeline
-from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
-
-
-torch.backends.cuda.matmul.allow_tf32 = False
-
-
-class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase):
- pass
-
-
-@slow
-@require_torch_gpu
-class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase):
- def test_inference_image_variations(self):
- pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion")
- pipe.to(torch_device)
- pipe.set_progress_bar_config(disable=None)
-
- image_prompt = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
- )
- generator = torch.manual_seed(0)
- image = pipe(
- image=image_prompt,
- generator=generator,
- guidance_scale=7.5,
- num_inference_steps=50,
- output_type="numpy",
- ).images
-
- image_slice = image[0, 253:256, 253:256, -1]
-
- assert image.shape == (1, 512, 512, 3)
- expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
-
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py
deleted file mode 100644
index 816d206f5735c008cd6bca6e3cbf7a81fdd9b619..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py
+++ /dev/null
@@ -1,140 +0,0 @@
-_base_ = [
- '../_base_/models/cascade_mask_rcnn_swin_fpn.py',
- '../_base_/datasets/coco_instance.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-
-model = dict(
- backbone=dict(
- embed_dim=96,
- depths=[2, 2, 18, 2],
- num_heads=[3, 6, 12, 24],
- window_size=7,
- ape=False,
- drop_path_rate=0.2,
- patch_norm=True,
- use_checkpoint=False
- ),
- neck=dict(in_channels=[96, 192, 384, 768]),
- roi_head=dict(
- bbox_head=[
- dict(
- type='ConvFCBBoxHead',
- num_shared_convs=4,
- num_shared_fcs=1,
- in_channels=256,
- conv_out_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- reg_decoded_bbox=True,
- norm_cfg=dict(type='SyncBN', requires_grad=True),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
- dict(
- type='ConvFCBBoxHead',
- num_shared_convs=4,
- num_shared_fcs=1,
- in_channels=256,
- conv_out_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.05, 0.05, 0.1, 0.1]),
- reg_class_agnostic=False,
- reg_decoded_bbox=True,
- norm_cfg=dict(type='SyncBN', requires_grad=True),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
- dict(
- type='ConvFCBBoxHead',
- num_shared_convs=4,
- num_shared_fcs=1,
- in_channels=256,
- conv_out_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.033, 0.033, 0.067, 0.067]),
- reg_class_agnostic=False,
- reg_decoded_bbox=True,
- norm_cfg=dict(type='SyncBN', requires_grad=True),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
- ]))
-
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-# augmentation strategy originates from DETR / Sparse RCNN
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='AutoAugment',
- policies=[
- [
- dict(type='Resize',
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
- (736, 1333), (768, 1333), (800, 1333)],
- multiscale_mode='value',
- keep_ratio=True)
- ],
- [
- dict(type='Resize',
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
- multiscale_mode='value',
- keep_ratio=True),
- dict(type='RandomCrop',
- crop_type='absolute_range',
- crop_size=(384, 600),
- allow_negative_crop=True),
- dict(type='Resize',
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
- (576, 1333), (608, 1333), (640, 1333),
- (672, 1333), (704, 1333), (736, 1333),
- (768, 1333), (800, 1333)],
- multiscale_mode='value',
- override=True,
- keep_ratio=True)
- ]
- ]),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-data = dict(train=dict(pipeline=train_pipeline))
-
-optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
- 'relative_position_bias_table': dict(decay_mult=0.),
- 'norm': dict(decay_mult=0.)}))
-lr_config = dict(step=[27, 33])
-runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
-
-# do not use mmdet version fp16
-fp16 = None
-optimizer_config = dict(
- type="DistOptimizerHook",
- update_interval=1,
- grad_clip=None,
- coalesce=True,
- bucket_size_mb=-1,
- use_fp16=True,
-)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index f7b07c4f47629c07faa013b9d1eae3462d898c6f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,12 +0,0 @@
-_base_ = [
- '../_base_/models/dnl_r50-d8.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
-optimizer = dict(
- paramwise_cfg=dict(
- custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.))))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py
deleted file mode 100644
index fb7c3d55d57b09296ea24889b218f9a0fb997463..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './pspnet_r50-d8_512x512_80k_ade20k.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py b/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py
deleted file mode 100644
index b45b2c27e0a05c275cbc50064288aece3ae3e856..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/YoloV5/utils/segment/loss.py
+++ /dev/null
@@ -1,186 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..general import xywh2xyxy
-from ..loss import FocalLoss, smooth_BCE
-from ..metrics import bbox_iou
-from ..torch_utils import de_parallel
-from .general import crop_mask
-
-
-class ComputeLoss:
- # Compute losses
- def __init__(self, model, autobalance=False, overlap=False):
- self.sort_obj_iou = False
- self.overlap = overlap
- device = next(model.parameters()).device # get model device
- h = model.hyp # hyperparameters
- self.device = device
-
- # Define criteria
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
-
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
-
- # Focal loss
- g = h['fl_gamma'] # focal loss gamma
- if g > 0:
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
-
- m = de_parallel(model).model[-1] # Detect() module
- self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
- self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
- self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
- self.na = m.na # number of anchors
- self.nc = m.nc # number of classes
- self.nl = m.nl # number of layers
- self.nm = m.nm # number of masks
- self.anchors = m.anchors
- self.device = device
-
- def __call__(self, preds, targets, masks): # predictions, targets, model
- p, proto = preds
- bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
- lcls = torch.zeros(1, device=self.device)
- lbox = torch.zeros(1, device=self.device)
- lobj = torch.zeros(1, device=self.device)
- lseg = torch.zeros(1, device=self.device)
- tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets
-
- # Losses
- for i, pi in enumerate(p): # layer index, layer predictions
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
- tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
-
- n = b.shape[0] # number of targets
- if n:
- pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions
-
- # Box regression
- pxy = pxy.sigmoid() * 2 - 0.5
- pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
- pbox = torch.cat((pxy, pwh), 1) # predicted box
- iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target)
- lbox += (1.0 - iou).mean() # iou loss
-
- # Objectness
- iou = iou.detach().clamp(0).type(tobj.dtype)
- if self.sort_obj_iou:
- j = iou.argsort()
- b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
- if self.gr < 1:
- iou = (1.0 - self.gr) + self.gr * iou
- tobj[b, a, gj, gi] = iou # iou ratio
-
- # Classification
- if self.nc > 1: # cls loss (only if multiple classes)
- t = torch.full_like(pcls, self.cn, device=self.device) # targets
- t[range(n), tcls[i]] = self.cp
- lcls += self.BCEcls(pcls, t) # BCE
-
- # Mask regression
- if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
- masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
- marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized
- mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device))
- for bi in b.unique():
- j = b == bi # matching index
- if self.overlap:
- mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0)
- else:
- mask_gti = masks[tidxs[i]][j]
- lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j])
-
- obji = self.BCEobj(pi[..., 4], tobj)
- lobj += obji * self.balance[i] # obj loss
- if self.autobalance:
- self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
-
- if self.autobalance:
- self.balance = [x / self.balance[self.ssi] for x in self.balance]
- lbox *= self.hyp["box"]
- lobj *= self.hyp["obj"]
- lcls *= self.hyp["cls"]
- lseg *= self.hyp["box"] / bs
-
- loss = lbox + lobj + lcls + lseg
- return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach()
-
- def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
- # Mask loss for one image
- pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80)
- loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
- return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
-
- def build_targets(self, p, targets):
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
- na, nt = self.na, targets.shape[0] # number of anchors, targets
- tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], []
- gain = torch.ones(8, device=self.device) # normalized to gridspace gain
- ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
- if self.overlap:
- batch = p[0].shape[0]
- ti = []
- for i in range(batch):
- num = (targets[:, 0] == i).sum() # find number of targets of each image
- ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num)
- ti = torch.cat(ti, 1) # (na, nt)
- else:
- ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1)
- targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices
-
- g = 0.5 # bias
- off = torch.tensor(
- [
- [0, 0],
- [1, 0],
- [0, 1],
- [-1, 0],
- [0, -1], # j,k,l,m
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
- ],
- device=self.device).float() * g # offsets
-
- for i in range(self.nl):
- anchors, shape = self.anchors[i], p[i].shape
- gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
-
- # Match targets to anchors
- t = targets * gain # shape(3,n,7)
- if nt:
- # Matches
- r = t[..., 4:6] / anchors[:, None] # wh ratio
- j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
- t = t[j] # filter
-
- # Offsets
- gxy = t[:, 2:4] # grid xy
- gxi = gain[[2, 3]] - gxy # inverse
- j, k = ((gxy % 1 < g) & (gxy > 1)).T
- l, m = ((gxi % 1 < g) & (gxi > 1)).T
- j = torch.stack((torch.ones_like(j), j, k, l, m))
- t = t.repeat((5, 1, 1))[j]
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
- else:
- t = targets[0]
- offsets = 0
-
- # Define
- bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors
- (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class
- gij = (gxy - offsets).long()
- gi, gj = gij.T # grid indices
-
- # Append
- indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
- anch.append(anchors[a]) # anchors
- tcls.append(c) # class
- tidxs.append(tidx)
- xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized
-
- return tcls, tbox, indices, anch, tidxs, xywhn
diff --git a/spaces/ArkanDash/rvc-models/infer_pack/commons.py b/spaces/ArkanDash/rvc-models/infer_pack/commons.py
deleted file mode 100644
index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000
--- a/spaces/ArkanDash/rvc-models/infer_pack/commons.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size * dilation - dilation) / 2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += (
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
- )
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def slice_segments2(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
- num_timescales - 1
- )
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
- )
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2, 3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1.0 / norm_type)
- return total_norm
diff --git a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py b/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py
deleted file mode 100644
index bf621e53dd2b467d5dcef4817fed0482c94ae458..0000000000000000000000000000000000000000
--- a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from einops import rearrange
-
-
-class InflatedConv3d(nn.Conv2d):
- def forward(self, x):
- video_length = x.shape[2]
-
- x = rearrange(x, "b c f h w -> (b f) c h w")
- x = super().forward(x)
- x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
-
- return x
-
-
-class Upsample3D(nn.Module):
- def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_conv_transpose = use_conv_transpose
- self.name = name
-
- conv = None
- if use_conv_transpose:
- raise NotImplementedError
- elif use_conv:
- conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
-
- if name == "conv":
- self.conv = conv
- else:
- self.Conv2d_0 = conv
-
- def forward(self, hidden_states, output_size=None):
- assert hidden_states.shape[1] == self.channels
-
- if self.use_conv_transpose:
- raise NotImplementedError
-
- # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
- dtype = hidden_states.dtype
- if dtype == torch.bfloat16:
- hidden_states = hidden_states.to(torch.float32)
-
- # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
- if hidden_states.shape[0] >= 64:
- hidden_states = hidden_states.contiguous()
-
- # if `output_size` is passed we force the interpolation output
- # size and do not make use of `scale_factor=2`
- if output_size is None:
- hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
- else:
- hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
-
- # If the input is bfloat16, we cast back to bfloat16
- if dtype == torch.bfloat16:
- hidden_states = hidden_states.to(dtype)
-
- if self.use_conv:
- if self.name == "conv":
- hidden_states = self.conv(hidden_states)
- else:
- hidden_states = self.Conv2d_0(hidden_states)
-
- return hidden_states
-
-
-class Downsample3D(nn.Module):
- def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.padding = padding
- stride = 2
- self.name = name
-
- if use_conv:
- conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
- else:
- raise NotImplementedError
-
- if name == "conv":
- self.Conv2d_0 = conv
- self.conv = conv
- elif name == "Conv2d_0":
- self.conv = conv
- else:
- self.conv = conv
-
- def forward(self, hidden_states):
- assert hidden_states.shape[1] == self.channels
- if self.use_conv and self.padding == 0:
- raise NotImplementedError
-
- assert hidden_states.shape[1] == self.channels
- hidden_states = self.conv(hidden_states)
-
- return hidden_states
-
-
-class ResnetBlock3D(nn.Module):
- def __init__(
- self,
- *,
- in_channels,
- out_channels=None,
- conv_shortcut=False,
- dropout=0.0,
- temb_channels=512,
- groups=32,
- groups_out=None,
- pre_norm=True,
- eps=1e-6,
- non_linearity="swish",
- time_embedding_norm="default",
- output_scale_factor=1.0,
- use_in_shortcut=None,
- ):
- super().__init__()
- self.pre_norm = pre_norm
- self.pre_norm = True
- self.in_channels = in_channels
- out_channels = in_channels if out_channels is None else out_channels
- self.out_channels = out_channels
- self.use_conv_shortcut = conv_shortcut
- self.time_embedding_norm = time_embedding_norm
- self.output_scale_factor = output_scale_factor
-
- if groups_out is None:
- groups_out = groups
-
- self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
-
- self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if temb_channels is not None:
- if self.time_embedding_norm == "default":
- time_emb_proj_out_channels = out_channels
- elif self.time_embedding_norm == "scale_shift":
- time_emb_proj_out_channels = out_channels * 2
- else:
- raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
-
- self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
- else:
- self.time_emb_proj = None
-
- self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
- self.dropout = torch.nn.Dropout(dropout)
- self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if non_linearity == "swish":
- self.nonlinearity = lambda x: F.silu(x)
- elif non_linearity == "mish":
- self.nonlinearity = Mish()
- elif non_linearity == "silu":
- self.nonlinearity = nn.SiLU()
-
- self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
-
- self.conv_shortcut = None
- if self.use_in_shortcut:
- self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, input_tensor, temb):
- hidden_states = input_tensor
-
- hidden_states = self.norm1(hidden_states)
- hidden_states = self.nonlinearity(hidden_states)
-
- hidden_states = self.conv1(hidden_states)
-
- if temb is not None:
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
-
- if temb is not None and self.time_embedding_norm == "default":
- hidden_states = hidden_states + temb
-
- hidden_states = self.norm2(hidden_states)
-
- if temb is not None and self.time_embedding_norm == "scale_shift":
- scale, shift = torch.chunk(temb, 2, dim=1)
- hidden_states = hidden_states * (1 + scale) + shift
-
- hidden_states = self.nonlinearity(hidden_states)
-
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.conv2(hidden_states)
-
- if self.conv_shortcut is not None:
- input_tensor = self.conv_shortcut(input_tensor)
-
- output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
-
- return output_tensor
-
-
-class Mish(torch.nn.Module):
- def forward(self, hidden_states):
- return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py
deleted file mode 100644
index 06526203911de55da3c2a8c5ae73f48024c3f018..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# unicode.py
-
-import sys
-from itertools import filterfalse
-from typing import List, Tuple, Union
-
-
-class _lazyclassproperty:
- def __init__(self, fn):
- self.fn = fn
- self.__doc__ = fn.__doc__
- self.__name__ = fn.__name__
-
- def __get__(self, obj, cls):
- if cls is None:
- cls = type(obj)
- if not hasattr(cls, "_intern") or any(
- cls._intern is getattr(superclass, "_intern", [])
- for superclass in cls.__mro__[1:]
- ):
- cls._intern = {}
- attrname = self.fn.__name__
- if attrname not in cls._intern:
- cls._intern[attrname] = self.fn(cls)
- return cls._intern[attrname]
-
-
-UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
-
-
-class unicode_set:
- """
- A set of Unicode characters, for language-specific strings for
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
- A unicode_set is defined by a list of ranges in the Unicode character
- set, in a class attribute ``_ranges``. Ranges can be specified using
- 2-tuples or a 1-tuple, such as::
-
- _ranges = [
- (0x0020, 0x007e),
- (0x00a0, 0x00ff),
- (0x0100,),
- ]
-
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
-
- A unicode set can also be defined using multiple inheritance of other unicode sets::
-
- class CJK(Chinese, Japanese, Korean):
- pass
- """
-
- _ranges: UnicodeRangeList = []
-
- @_lazyclassproperty
- def _chars_for_ranges(cls):
- ret = []
- for cc in cls.__mro__:
- if cc is unicode_set:
- break
- for rr in getattr(cc, "_ranges", ()):
- ret.extend(range(rr[0], rr[-1] + 1))
- return [chr(c) for c in sorted(set(ret))]
-
- @_lazyclassproperty
- def printables(cls):
- "all non-whitespace characters in this range"
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphas(cls):
- "all alphabetic characters in this range"
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def nums(cls):
- "all numeric digit characters in this range"
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphanums(cls):
- "all alphanumeric characters in this range"
- return cls.alphas + cls.nums
-
- @_lazyclassproperty
- def identchars(cls):
- "all characters in this range that are valid identifier characters, plus underscore '_'"
- return "".join(
- sorted(
- set(
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
- + "_"
- )
- )
- )
-
- @_lazyclassproperty
- def identbodychars(cls):
- """
- all characters in this range that are valid identifier body characters,
- plus the digits 0-9
- """
- return "".join(
- sorted(
- set(
- cls.identchars
- + "0123456789"
- + "".join(
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
- )
- )
- )
- )
-
-
-class pyparsing_unicode(unicode_set):
- """
- A namespace class for defining common language unicode_sets.
- """
-
- # fmt: off
-
- # define ranges in language character sets
- _ranges: UnicodeRangeList = [
- (0x0020, sys.maxunicode),
- ]
-
- class BasicMultilingualPlane(unicode_set):
- "Unicode set for the Basic Multilingual Plane"
- _ranges: UnicodeRangeList = [
- (0x0020, 0xFFFF),
- ]
-
- class Latin1(unicode_set):
- "Unicode set for Latin-1 Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0020, 0x007E),
- (0x00A0, 0x00FF),
- ]
-
- class LatinA(unicode_set):
- "Unicode set for Latin-A Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0100, 0x017F),
- ]
-
- class LatinB(unicode_set):
- "Unicode set for Latin-B Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0180, 0x024F),
- ]
-
- class Greek(unicode_set):
- "Unicode set for Greek Unicode Character Ranges"
- _ranges: UnicodeRangeList = [
- (0x0342, 0x0345),
- (0x0370, 0x0377),
- (0x037A, 0x037F),
- (0x0384, 0x038A),
- (0x038C,),
- (0x038E, 0x03A1),
- (0x03A3, 0x03E1),
- (0x03F0, 0x03FF),
- (0x1D26, 0x1D2A),
- (0x1D5E,),
- (0x1D60,),
- (0x1D66, 0x1D6A),
- (0x1F00, 0x1F15),
- (0x1F18, 0x1F1D),
- (0x1F20, 0x1F45),
- (0x1F48, 0x1F4D),
- (0x1F50, 0x1F57),
- (0x1F59,),
- (0x1F5B,),
- (0x1F5D,),
- (0x1F5F, 0x1F7D),
- (0x1F80, 0x1FB4),
- (0x1FB6, 0x1FC4),
- (0x1FC6, 0x1FD3),
- (0x1FD6, 0x1FDB),
- (0x1FDD, 0x1FEF),
- (0x1FF2, 0x1FF4),
- (0x1FF6, 0x1FFE),
- (0x2129,),
- (0x2719, 0x271A),
- (0xAB65,),
- (0x10140, 0x1018D),
- (0x101A0,),
- (0x1D200, 0x1D245),
- (0x1F7A1, 0x1F7A7),
- ]
-
- class Cyrillic(unicode_set):
- "Unicode set for Cyrillic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0400, 0x052F),
- (0x1C80, 0x1C88),
- (0x1D2B,),
- (0x1D78,),
- (0x2DE0, 0x2DFF),
- (0xA640, 0xA672),
- (0xA674, 0xA69F),
- (0xFE2E, 0xFE2F),
- ]
-
- class Chinese(unicode_set):
- "Unicode set for Chinese Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x2E80, 0x2E99),
- (0x2E9B, 0x2EF3),
- (0x31C0, 0x31E3),
- (0x3400, 0x4DB5),
- (0x4E00, 0x9FEF),
- (0xA700, 0xA707),
- (0xF900, 0xFA6D),
- (0xFA70, 0xFAD9),
- (0x16FE2, 0x16FE3),
- (0x1F210, 0x1F212),
- (0x1F214, 0x1F23B),
- (0x1F240, 0x1F248),
- (0x20000, 0x2A6D6),
- (0x2A700, 0x2B734),
- (0x2B740, 0x2B81D),
- (0x2B820, 0x2CEA1),
- (0x2CEB0, 0x2EBE0),
- (0x2F800, 0x2FA1D),
- ]
-
- class Japanese(unicode_set):
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
- _ranges: UnicodeRangeList = []
-
- class Kanji(unicode_set):
- "Unicode set for Kanji Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x4E00, 0x9FBF),
- (0x3000, 0x303F),
- ]
-
- class Hiragana(unicode_set):
- "Unicode set for Hiragana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3041, 0x3096),
- (0x3099, 0x30A0),
- (0x30FC,),
- (0xFF70,),
- (0x1B001,),
- (0x1B150, 0x1B152),
- (0x1F200,),
- ]
-
- class Katakana(unicode_set):
- "Unicode set for Katakana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3099, 0x309C),
- (0x30A0, 0x30FF),
- (0x31F0, 0x31FF),
- (0x32D0, 0x32FE),
- (0xFF65, 0xFF9F),
- (0x1B000,),
- (0x1B164, 0x1B167),
- (0x1F201, 0x1F202),
- (0x1F213,),
- ]
-
- class Hangul(unicode_set):
- "Unicode set for Hangul (Korean) Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x1100, 0x11FF),
- (0x302E, 0x302F),
- (0x3131, 0x318E),
- (0x3200, 0x321C),
- (0x3260, 0x327B),
- (0x327E,),
- (0xA960, 0xA97C),
- (0xAC00, 0xD7A3),
- (0xD7B0, 0xD7C6),
- (0xD7CB, 0xD7FB),
- (0xFFA0, 0xFFBE),
- (0xFFC2, 0xFFC7),
- (0xFFCA, 0xFFCF),
- (0xFFD2, 0xFFD7),
- (0xFFDA, 0xFFDC),
- ]
-
- Korean = Hangul
-
- class CJK(Chinese, Japanese, Hangul):
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
-
- class Thai(unicode_set):
- "Unicode set for Thai Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0E01, 0x0E3A),
- (0x0E3F, 0x0E5B)
- ]
-
- class Arabic(unicode_set):
- "Unicode set for Arabic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0600, 0x061B),
- (0x061E, 0x06FF),
- (0x0700, 0x077F),
- ]
-
- class Hebrew(unicode_set):
- "Unicode set for Hebrew Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0591, 0x05C7),
- (0x05D0, 0x05EA),
- (0x05EF, 0x05F4),
- (0xFB1D, 0xFB36),
- (0xFB38, 0xFB3C),
- (0xFB3E,),
- (0xFB40, 0xFB41),
- (0xFB43, 0xFB44),
- (0xFB46, 0xFB4F),
- ]
-
- class Devanagari(unicode_set):
- "Unicode set for Devanagari Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0900, 0x097F),
- (0xA8E0, 0xA8FF)
- ]
-
- # fmt: on
-
-
-pyparsing_unicode.Japanese._ranges = (
- pyparsing_unicode.Japanese.Kanji._ranges
- + pyparsing_unicode.Japanese.Hiragana._ranges
- + pyparsing_unicode.Japanese.Katakana._ranges
-)
-
-pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
-
-# add language identifiers using language Unicode
-pyparsing_unicode.العربية = pyparsing_unicode.Arabic
-pyparsing_unicode.中文 = pyparsing_unicode.Chinese
-pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
-pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
-pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
-pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
-pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
-pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
-pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
-pyparsing_unicode.한국어 = pyparsing_unicode.Korean
-pyparsing_unicode.ไทย = pyparsing_unicode.Thai
-pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py
deleted file mode 100644
index 71f66bd03cb713a2190853bdf7170c4ea80d2425..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import types
-import functools
-
-
-# from jaraco.functools 3.3
-def method_cache(method, cache_wrapper=None):
- """
- Wrap lru_cache to support storing the cache data in the object instances.
-
- Abstracts the common paradigm where the method explicitly saves an
- underscore-prefixed protected property on first call and returns that
- subsequently.
-
- >>> class MyClass:
- ... calls = 0
- ...
- ... @method_cache
- ... def method(self, value):
- ... self.calls += 1
- ... return value
-
- >>> a = MyClass()
- >>> a.method(3)
- 3
- >>> for x in range(75):
- ... res = a.method(x)
- >>> a.calls
- 75
-
- Note that the apparent behavior will be exactly like that of lru_cache
- except that the cache is stored on each instance, so values in one
- instance will not flush values from another, and when an instance is
- deleted, so are the cached values for that instance.
-
- >>> b = MyClass()
- >>> for x in range(35):
- ... res = b.method(x)
- >>> b.calls
- 35
- >>> a.method(0)
- 0
- >>> a.calls
- 75
-
- Note that if method had been decorated with ``functools.lru_cache()``,
- a.calls would have been 76 (due to the cached value of 0 having been
- flushed by the 'b' instance).
-
- Clear the cache with ``.cache_clear()``
-
- >>> a.method.cache_clear()
-
- Same for a method that hasn't yet been called.
-
- >>> c = MyClass()
- >>> c.method.cache_clear()
-
- Another cache wrapper may be supplied:
-
- >>> cache = functools.lru_cache(maxsize=2)
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
- >>> a = MyClass()
- >>> a.method2()
- 3
-
- Caution - do not subsequently wrap the method with another decorator, such
- as ``@property``, which changes the semantics of the function.
-
- See also
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
- for another implementation and additional justification.
- """
- cache_wrapper = cache_wrapper or functools.lru_cache()
-
- def wrapper(self, *args, **kwargs):
- # it's the first call, replace the method with a cached, bound method
- bound_method = types.MethodType(method, self)
- cached_method = cache_wrapper(bound_method)
- setattr(self, method.__name__, cached_method)
- return cached_method(*args, **kwargs)
-
- # Support cache clear even before cache has been created.
- wrapper.cache_clear = lambda: None
-
- return wrapper
-
-
-# From jaraco.functools 3.3
-def pass_none(func):
- """
- Wrap func so it's not called if its first param is None
-
- >>> print_text = pass_none(print)
- >>> print_text('text')
- text
- >>> print_text(None)
- """
-
- @functools.wraps(func)
- def wrapper(param, *args, **kwargs):
- if param is not None:
- return func(param, *args, **kwargs)
-
- return wrapper
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py
deleted file mode 100644
index 11a1c6be28ad008b7c083c229bb0df644ec58a0e..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py
+++ /dev/null
@@ -1,457 +0,0 @@
-"""setuptools.command.bdist_egg
-
-Build .egg distributions"""
-
-from distutils.dir_util import remove_tree, mkpath
-from distutils import log
-from types import CodeType
-import sys
-import os
-import re
-import textwrap
-import marshal
-
-from pkg_resources import get_build_platform, Distribution
-from setuptools.extension import Library
-from setuptools import Command
-from .._path import ensure_directory
-
-from sysconfig import get_path, get_python_version
-
-
-def _get_purelib():
- return get_path("purelib")
-
-
-def strip_module(filename):
- if '.' in filename:
- filename = os.path.splitext(filename)[0]
- if filename.endswith('module'):
- filename = filename[:-6]
- return filename
-
-
-def sorted_walk(dir):
- """Do os.walk in a reproducible way,
- independent of indeterministic filesystem readdir order
- """
- for base, dirs, files in os.walk(dir):
- dirs.sort()
- files.sort()
- yield base, dirs, files
-
-
-def write_stub(resource, pyfile):
- _stub_template = textwrap.dedent("""
- def __bootstrap__():
- global __bootstrap__, __loader__, __file__
- import sys, pkg_resources, importlib.util
- __file__ = pkg_resources.resource_filename(__name__, %r)
- __loader__ = None; del __bootstrap__, __loader__
- spec = importlib.util.spec_from_file_location(__name__,__file__)
- mod = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(mod)
- __bootstrap__()
- """).lstrip()
- with open(pyfile, 'w') as f:
- f.write(_stub_template % resource)
-
-
-class bdist_egg(Command):
- description = "create an \"egg\" distribution"
-
- user_options = [
- ('bdist-dir=', 'b',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p', "platform name to embed in generated filenames "
- "(default: %s)" % get_build_platform()),
- ('exclude-source-files', None,
- "remove all .py files from the generated egg"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ]
-
- boolean_options = [
- 'keep-temp', 'skip-build', 'exclude-source-files'
- ]
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = 0
- self.egg_output = None
- self.exclude_source_files = None
-
- def finalize_options(self):
- ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
- self.egg_info = ei_cmd.egg_info
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'egg')
-
- if self.plat_name is None:
- self.plat_name = get_build_platform()
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.egg_output is None:
-
- # Compute filename of the output egg
- basename = Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version,
- get_python_version(),
- self.distribution.has_ext_modules() and self.plat_name
- ).egg_name()
-
- self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
-
- def do_install_data(self):
- # Hack for packages that install data to install's --install-lib
- self.get_finalized_command('install').install_lib = self.bdist_dir
-
- site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
- old, self.distribution.data_files = self.distribution.data_files, []
-
- for item in old:
- if isinstance(item, tuple) and len(item) == 2:
- if os.path.isabs(item[0]):
- realpath = os.path.realpath(item[0])
- normalized = os.path.normcase(realpath)
- if normalized == site_packages or normalized.startswith(
- site_packages + os.sep
- ):
- item = realpath[len(site_packages) + 1:], item[1]
- # XXX else: raise ???
- self.distribution.data_files.append(item)
-
- try:
- log.info("installing package data to %s", self.bdist_dir)
- self.call_command('install_data', force=0, root=None)
- finally:
- self.distribution.data_files = old
-
- def get_outputs(self):
- return [self.egg_output]
-
- def call_command(self, cmdname, **kw):
- """Invoke reinitialized command `cmdname` with keyword args"""
- for dirname in INSTALL_DIRECTORY_ATTRS:
- kw.setdefault(dirname, self.bdist_dir)
- kw.setdefault('skip_build', self.skip_build)
- kw.setdefault('dry_run', self.dry_run)
- cmd = self.reinitialize_command(cmdname, **kw)
- self.run_command(cmdname)
- return cmd
-
- def run(self): # noqa: C901 # is too complex (14) # FIXME
- # Generate metadata first
- self.run_command("egg_info")
- # We run install_lib before install_data, because some data hacks
- # pull their data path from the install_lib command.
- log.info("installing library code to %s", self.bdist_dir)
- instcmd = self.get_finalized_command('install')
- old_root = instcmd.root
- instcmd.root = None
- if self.distribution.has_c_libraries() and not self.skip_build:
- self.run_command('build_clib')
- cmd = self.call_command('install_lib', warn_dir=0)
- instcmd.root = old_root
-
- all_outputs, ext_outputs = self.get_ext_outputs()
- self.stubs = []
- to_compile = []
- for (p, ext_name) in enumerate(ext_outputs):
- filename, ext = os.path.splitext(ext_name)
- pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
- '.py')
- self.stubs.append(pyfile)
- log.info("creating stub loader for %s", ext_name)
- if not self.dry_run:
- write_stub(os.path.basename(ext_name), pyfile)
- to_compile.append(pyfile)
- ext_outputs[p] = ext_name.replace(os.sep, '/')
-
- if to_compile:
- cmd.byte_compile(to_compile)
- if self.distribution.data_files:
- self.do_install_data()
-
- # Make the EGG-INFO directory
- archive_root = self.bdist_dir
- egg_info = os.path.join(archive_root, 'EGG-INFO')
- self.mkpath(egg_info)
- if self.distribution.scripts:
- script_dir = os.path.join(egg_info, 'scripts')
- log.info("installing scripts to %s", script_dir)
- self.call_command('install_scripts', install_dir=script_dir,
- no_ep=1)
-
- self.copy_metadata_to(egg_info)
- native_libs = os.path.join(egg_info, "native_libs.txt")
- if all_outputs:
- log.info("writing %s", native_libs)
- if not self.dry_run:
- ensure_directory(native_libs)
- libs_file = open(native_libs, 'wt')
- libs_file.write('\n'.join(all_outputs))
- libs_file.write('\n')
- libs_file.close()
- elif os.path.isfile(native_libs):
- log.info("removing %s", native_libs)
- if not self.dry_run:
- os.unlink(native_libs)
-
- write_safety_flag(
- os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
- )
-
- if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
- log.warn(
- "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
- if self.exclude_source_files:
- self.zap_pyfiles()
-
- # Make the archive
- make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
- dry_run=self.dry_run, mode=self.gen_header())
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # Add to 'Distribution.dist_files' so that the "upload" command works
- getattr(self.distribution, 'dist_files', []).append(
- ('bdist_egg', get_python_version(), self.egg_output))
-
- def zap_pyfiles(self):
- log.info("Removing .py files from temporary directory")
- for base, dirs, files in walk_egg(self.bdist_dir):
- for name in files:
- path = os.path.join(base, name)
-
- if name.endswith('.py'):
- log.debug("Deleting %s", path)
- os.unlink(path)
-
- if base.endswith('__pycache__'):
- path_old = path
-
- pattern = r'(?PAgar.io Indir Apk: Cómo descargar y jugar el popular juego en línea-¿Estás buscando un juego online divertido y adictivo que puedas jugar en tu dispositivo Android? Si es así, es posible que desee probar Agar.io, un juego de acción en línea multijugador masivo que tiene millones de fans en todo el mundo. En este artículo, le diremos qué es Agar.io, por qué debe descargar su archivo apk, cómo descargarlo e instalarlo, cómo jugarlo en línea con amigos y cuáles son las revisiones del juego. ¡Vamos a empezar! -¿Qué es Agar.io?-Agar.io es un juego creado por el desarrollador brasileño Matheus Valadares en 2015. Se basa en el concepto de comer agar, una sustancia utilizada para cultivar bacterias en una placa de Petri. En el juego, controlas una célula circular que puede comer células más pequeñas y pellets de agar para crecer más grande, evitando las células más grandes que pueden comerte. El juego tiene un modo de juego simple pero adictivo que atrae a jugadores de todas las edades y orígenes. -agar.io indir apkDownload Zip >>>>> https://bltlly.com/2v6K4H - El juego de Agar.io-La jugabilidad de Agar.io es fácil de aprender pero difícil de dominar. Comienza con una celda pequeña que puede moverse por el mapa usando el dedo o el ratón. Usted puede comer pellets de agar que se dispersan al azar alrededor del mapa para aumentar su masa ligeramente, o puede comer otras células que son más pequeñas que usted para aumentar su masa significativamente. Sin embargo, también tienes que tener cuidado con otras células que son más grandes que tú, ya que pueden comerte y terminar tu juego. -También puede utilizar dos botones para mejorar su juego. El botón de división le permite dividir su celda en dos celdas más pequeñas que pueden moverse más rápido y comer células más pequeñas más fácilmente. Sin embargo, la división también lo hace más vulnerable a las células más grandes que pueden comer sus células más pequeñas. El botón de expulsión le permite expulsar algo de masa de su celda en la dirección que está apuntando. Esto se puede utilizar para alimentar otras células, disparar virus a ellos, o escapar de ellos. -Las características de Agar.io- -
¿Por qué descargar apk Agar.io?-Si desea jugar Agar.io en su dispositivo Android, es posible que se pregunte por qué debe descargar su archivo apk en lugar de instalarlo desde la Google Play Store. Bueno, hay varias razones por las que descargar Agar.io apk es una mejor opción para usted. Estos son algunos de ellos: -Los beneficios de descargar Agar.io apk-Descargar Agar.io apk tiene muchos beneficios, tales como: -
Los requisitos para descargar Agar.io apk-Antes de descargar Agar.io apk, es necesario asegurarse de que el dispositivo cumple con los siguientes requisitos: - -
Cómo descargar e instalar apk Agar.io?-Ahora que sabes por qué y cómo descargar Agar.io apk, usted puede preguntarse cómo hacerlo. No te preocupes, es muy fácil y simple. Solo tienes que seguir estos pasos: -Los pasos para descargar e instalar Agar.io apk-
Los consejos y trucos para jugar apk Agar.io-Si desea mejorar sus habilidades y divertirse más jugando Agar.io apk, es posible que desee aprender algunos consejos y trucos que pueden ayudarle. Estos son algunos de ellos: -
¿Cómo jugar a Agar.io online con amigos?-Si quieres jugar a Agar.io online con tus amigos, quizás te preguntes cómo hacerlo. No te preocupes, es muy fácil y sencillo. Solo tienes que seguir estos pasos: -Los modos de Agar.io en línea-Agar.io online tiene diferentes modos entre los que puedes elegir, dependiendo de tu preferencia y estado de ánimo. Algunos de estos modos son: -
Las estrategias de Agar.io online-Si quieres mejorar tus habilidades y divertirte más jugando a Agar.io online, quizás quieras aprender algunas estrategias que te pueden ayudar. Estos son algunos de ellos: -
¿Cuáles son las opiniones de Agar.io apk?-Si quieres saber lo que otros jugadores piensan de Agar.io apk, es posible que desee leer algunos comentarios del juego. Aquí hay algunos ejemplos de comentarios positivos y negativos de usuarios reales: -Los comentarios positivos de Agar.io apk-Nombre | Calificación | Revisión | Alice | 5 estrellas | ¡Me encanta este juego! Es muy divertido y adictivo. Lo juego todos los días con mis amigos y nos lo pasamos genial. Los gráficos son simples pero lindo, el juego es suave y rápido, y los modos son diversos y emocionantes. Recomiendo este juego a cualquiera que le gusten los juegos en línea. | Charlie | 5 estrellas | Este juego es increíble! Es muy simple pero adictivo. Me gusta cómo puedes personalizar tu celda con diferentes pieles y nombres, y chatear con otros jugadores en el juego. El juego es muy social y amigable. La mejor parte es que es gratis y fácil de descargar e instalar. | Los comentarios negativos de Agar.io apk-Nombre | Calificación | Revisión | Dave | 2 estrellas | Este juego es aburrido! Es muy repetitivo y frustrante. No me gusta cómo puedes ser comido por células más grandes o virus en un segundo, y perder todo tu progreso. El juego es muy injusto y aleatorio. La peor parte es que tiene demasiados anuncios y compras en la aplicación. | Este juego es terrible! Es muy defectuoso y lento. No me gusta cómo el juego se congela o se bloquea todo el tiempo, y me hace perder mi conexión o mi progreso. El juego es muy buggy e inestable. La peor parte es que tiene demasiados hackers y tramposos que arruinan el juego para todos los demás. | ||||||||||||
Frank | 3 estrellas | Este juego está bien. Es muy simple y fácil de jugar. Me gusta cómo puedes jugar con otros jugadores online, pero también offline si quieres. El juego es muy casual y relajante. Lo único que no me gusta es que es demasiado básico y carece de profundidad. El juego podría usar más funciones y modos para hacerlo más interesante y divertido. |