parquet-converter commited on
Commit
ac0524b
·
1 Parent(s): 6797e3e

Update parquet files (step 36 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/All In All Azhagu Raja TOP Full Movie Download Utorrent.md +0 -47
  2. spaces/1gistliPinn/ChatGPT4/Examples/Download Kurukshetra 2 Full Movie HD 1080p Everything You Need to Know About the Star-Studded Cast and Crew.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Elstat Ems 55 Advanced User Manual.md +0 -7
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 2018 Malayalam Movie Songs MP3 Songs Online for Free.md +0 -117
  5. spaces/1phancelerku/anime-remove-background/Benefits of Using Facebook Lite APK 4 on Your Android Device - Faster Smaller and Cheaper.md +0 -153
  6. spaces/1phancelerku/anime-remove-background/Download MIR M and Master Your Professions and Skills.md +0 -103
  7. spaces/1phancelerku/anime-remove-background/Engineering Drawing Practice for Schools and Colleges - SP 46 (2003) PDF.md +0 -61
  8. spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/__init__.py +0 -12
  9. spaces/801artistry/RVC801/diffq/diffq.py +0 -286
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/checkpoints/train_vq.py +0 -171
  11. spaces/AIGText/GlyphControl/ldm/modules/distributions/__init__.py +0 -0
  12. spaces/Abhishek92kumar/layoutlmv3-finetuned-cord_100/README.md +0 -13
  13. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/callback/+page.server.ts +0 -39
  14. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/utils.py +0 -189
  15. spaces/AdithyaSNair/Dog_breed_predictor/app.py +0 -40
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/methods/InputMethods.js +0 -26
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/GetEaseConfig.js +0 -10
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Factory.d.ts +0 -5
  19. spaces/AhmedKhairullah/dmo/README.md +0 -12
  20. spaces/Alcedo/yunmedia/server.js +0 -287
  21. spaces/Amrrs/DragGan-Inversion/PTI/criteria/__init__.py +0 -0
  22. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/model.py +0 -674
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/tutorials/basic_training.md +0 -416
  24. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/random_sampler.py +0 -78
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/__init__.py +0 -16
  26. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/test_mixins.py +0 -348
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/utils/__init__.py +0 -16
  28. spaces/Arthur678/vits-uma-genshin-honkai/mel_processing.py +0 -101
  29. spaces/Banbri/zcvzcv/tailwind.config.js +0 -72
  30. spaces/BartPoint/VoiceChange/README.md +0 -11
  31. spaces/BetterAPI/BetterChat_new/src/lib/utils/streamToAsyncIterable.ts +0 -15
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/core.py +0 -108
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/text.py +0 -1307
  34. spaces/BorisovMaksim/denoising/app.py +0 -131
  35. spaces/Brainclub5000/wesley7137-Llama-2-13B-Nous-Hermes-vicuna-uncensored-mastermod-spych/README.md +0 -12
  36. spaces/CarlDennis/HYTTS/text/korean.py +0 -205
  37. spaces/ChandraMohanNayal/AutoGPT/README.md +0 -13
  38. spaces/Chris4K/llms_compare/Ek Villain 2014 Full Movie In Hindi Download.md +0 -88
  39. spaces/CikeyQI/meme-api/docs/develop.md +0 -135
  40. spaces/CofAI/chat/client/html/index.html +0 -135
  41. spaces/CornSnakeID/CornSnakeMorphID/README.md +0 -65
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_windows.py +0 -64
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_version.py +0 -16
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/huggingface_cli.py +0 -49
  45. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/config/defaults.py +0 -289
  46. spaces/DeeeTeeee01/VODAFONE-CUSTOMER-CHURN-PREDICTION-APP/README.md +0 -12
  47. spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/lpips/pretrained_networks.py +0 -181
  48. spaces/Dusan/clickbaitonator/fudge/data.py +0 -415
  49. spaces/EXPOSUREEE/Ai-Image-Enhancer/README.md +0 -35
  50. spaces/Eddycrack864/Applio-Inference/tools/infer_batch_rvc.py +0 -72
spaces/1gistliPinn/ChatGPT4/Examples/All In All Azhagu Raja TOP Full Movie Download Utorrent.md DELETED
@@ -1,47 +0,0 @@
1
- <br />
2
- <h1>All In All Azhagu Raja Full Movie Download Utorrent: Is It Worth It?</h1>
3
-
4
- <p>All In All Azhagu Raja is a 2013 Tamil comedy-romance film directed by M. Rajesh and starring Karthi, Kajal Aggarwal and Santhanam. The film revolves around Azhagu Raja, who owns a cable channel, and his love interest Priya, who faces many failures in life. The film also features a subplot involving their families' bitter past and how it affects their relationship.</p>
5
- <h2>All In All Azhagu Raja Full Movie Download Utorrent</h2><br /><p><b><b>Download File</b> &#128505; <a href="https://imgfil.com/2uxX2V">https://imgfil.com/2uxX2V</a></b></p><br /><br />
6
-
7
- <p>The film was released on November 2, 2013 and received mixed to negative reviews from critics and audiences. The film was criticized for its weak plot, poor comedy, lack of originality and excessive length. The film was also a commercial failure at the box office, failing to recover its budget of ₹25 crore.</p>
8
-
9
- <p>Despite its poor performance, the film has gained some popularity among online viewers who are looking for a light-hearted entertainer. Some of them may be tempted to download the film using torrent sites like Utorrent. However, this is not a wise decision as it may have some serious consequences.</p>
10
-
11
- <h2>Why You Should Not Download All In All Azhagu Raja Full Movie Using Utorrent</h2>
12
-
13
- <p>Downloading movies using torrent sites like Utorrent is illegal and unethical. It violates the copyright laws and deprives the filmmakers of their rightful earnings. It also exposes you to various risks such as:</p>
14
-
15
- <ul>
16
- <li>Virus and malware infection: Torrent files may contain harmful software that can damage your device or steal your personal information.</li>
17
- <li>Legal action: You may face legal action from the authorities or the content owners for downloading or distributing pirated content.</li>
18
- <li>Poor quality: The torrent files may not have the original quality of the film and may have issues such as low resolution, poor audio, missing subtitles, etc.</li>
19
- <li>Unreliable sources: The torrent sites may not have the complete or authentic version of the film and may have fake or corrupted files.</li>
20
- </ul>
21
-
22
- <p>Therefore, it is better to avoid downloading All In All Azhagu Raja full movie using Utorrent and instead watch it legally on streaming platforms like Prime Video[^3^] or Sun Nxt[^2^] where you can enjoy the film in high quality and with subtitles.</p>
23
-
24
- <h2>Conclusion</h2>
25
-
26
- <p>All In All Azhagu Raja is a Tamil comedy-romance film that failed to impress the critics and audiences when it was released in 2013. However, some online viewers may still find it entertaining and want to watch it. If you are one of them, do not download the film using torrent sites like Utorrent as it is illegal and risky. Instead, watch it legally on streaming platforms like Prime Video or Sun Nxt where you can enjoy the film in high quality and with subtitles.</p>
27
- <p></p>
28
-
29
- <h2>More About All In All Azhagu Raja</h2>
30
-
31
- <p>If you are curious to know more about the film, here are some facts and trivia that you may find interesting:</p>
32
-
33
- <ul>
34
- <li>The film is the third collaboration between director M. Rajesh and actor Santhanam, after Siva Manasula Sakthi (2009) and Boss Engira Bhaskaran (2010).</li>
35
- <li>The film is also the second collaboration between Karthi and Kajal Aggarwal, after Naan Mahaan Alla (2010).</li>
36
- <li>The film features a cameo appearance by Subbu Panchu as a director who wants to cast Azhagu Raja in his film.</li>
37
- <li>The film's music was composed by S. Thaman and the lyrics were written by Na. Muthukumar. The film has six songs, including the title track "All In All" sung by Suchith Suresan.</li>
38
- <li>The film's trailer was released on October 10, 2013 and received over 1 million views on YouTube within a week.</li>
39
- </ul>
40
-
41
- <p>Despite these factors, the film failed to live up to the expectations of the fans and critics and was declared a flop at the box office. The film was also dubbed in Hindi as Hero No. Zero 2 and released on YouTube in 2018.</p>
42
-
43
- <h2>Final Words</h2>
44
-
45
- <p>All In All Azhagu Raja is a Tamil comedy-romance film that may appeal to some viewers who are looking for a light-hearted entertainer. However, the film has many flaws and drawbacks that make it a disappointing watch for most. If you want to watch the film, do not download it using torrent sites like Utorrent as it is illegal and risky. Instead, watch it legally on streaming platforms like Prime Video or Sun Nxt where you can enjoy the film in high quality and with subtitles.</p> d5da3c52bf<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Kurukshetra 2 Full Movie HD 1080p Everything You Need to Know About the Star-Studded Cast and Crew.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>download Kurukshetra 2 full movie hd 1080p</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://imgfil.com/2uxXlE">https://imgfil.com/2uxXlE</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Elstat Ems 55 Advanced User Manual.md DELETED
@@ -1,7 +0,0 @@
1
-
2
- <p>this product page has more information about elstat ems 55 advanced user manual. elstat ems 55 advanced user manual. one of the clients we have here is the distributor of quilmes and pepsi. elstat ems 55 advanced user manual, beetel m71 owners manual, parkin microeconomics 10th edition pdf. cold symptoms such as stuffy nose, sneezing, sore throat. see photos below.</p>
3
- <p> <figure> <figcaption>employment and unemployment in greece sources eurostat elstat download scientific diagram from www.researchgate.net</figcaption> </figure> two young women wearing face mask to protect from the spread of coronavirus, walk with their bicycles on ermou street, athens main shopping area, tuesday, april 6, 2021. this is not a complete list of side effects and others may occur. one of the clients we have here is the distributor of quilmes and pepsi. elstat ems 55 advanced user manual. posted on august 13, 2019 by admin. elstat ems 55 advanced manual start (or 'free ems 55 advanced manual start downloads') is a software selection of 65 downloads, that can be described as: esee disputes elstat retail turnover data. see photos below.</p>
4
- <h2>Elstat Ems 55 Advanced User Manual</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uy0wN">https://imgfil.com/2uy0wN</a></b></p><br /><br />
5
- <p><strong>elstat : controlador nexo ems 100 elstat : one of the clients we have here is the distributor of quilmes and pepsi.</strong>. elstat ems 55 advanced user manual, beetel m71 owners manual, parkin microeconomics 10th edition pdf. this is not a complete list of side effects and others may occur. many of the exhibitors bring control elstat ems 55 advanced, we. posted on august 13, 2019 by admin. elstat ems 55 advanced user manual. see photos below. cold symptoms such as stuffy nose, sneezing, sore throat. the part functionality is the same, but the appearance is differentsee photos below. temperature elstat ems v ems55v11 temperature controller.</p> 899543212b<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 2018 Malayalam Movie Songs MP3 Songs Online for Free.md DELETED
@@ -1,117 +0,0 @@
1
- <br />
2
- <h1>How to Download 2018 Malayalam Movie Songs</h1>
3
- <p>If you are a fan of Malayalam cinema, you might be interested in downloading some of the best Malayalam movie songs from 2018. Malayalam is the official language of Kerala, a state in India, and it has a rich and diverse musical culture. Malayalam movie songs are known for their catchy tunes, meaningful lyrics, and expressive vocals. In this article, we will show you how to download 2018 Malayalam movie songs from some of the best sites on the internet. We will also explain what are Malayalam movie songs and why they are popular among music lovers.</p>
4
- <h2>download 2018 malayalam movie songs</h2><br /><p><b><b>DOWNLOAD</b> &mdash; <a href="https://urlin.us/2uSYmN">https://urlin.us/2uSYmN</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What are Malayalam movie songs and why are they popular?</h3>
7
- <p>Malayalam movie songs are songs that are featured in Malayalam movies, also known as Mollywood. Malayalam movies are a part of Indian cinema, and they have a distinct style and identity. Malayalam movies often deal with social issues, realistic themes, and complex characters. They also showcase the beauty and diversity of Kerala's culture, landscape, and language.</p>
8
- <p>Malayalam movie songs are an integral part of Malayalam movies, as they enhance the mood, emotion, and message of the story. Malayalam movie songs are composed by talented musicians who blend classical, folk, and modern elements. They are sung by versatile singers who can convey different emotions and nuances. They are written by poetic lyricists who use rich and creative language. Malayalam movie songs are popular because they appeal to a wide range of audiences, both in India and abroad. They have a universal appeal that transcends linguistic and cultural barriers.</p>
9
- <h3>What are the best sites to download Malayalam movie songs from 2018?</h3>
10
- <p>There are many sites that offer Malayalam movie songs for free download or online streaming. However, not all of them are safe, legal, or reliable. Some of them may contain viruses, malware, or spam. Some of them may have low-quality audio or broken links. Some of them may violate the copyright laws or infringe on the rights of the artists.</p>
11
- <p>download best of malayalam 2018 songs<br />
12
- download malayalam film songs 2018 hits<br />
13
- download oru adaar love movie songs<br />
14
- download koode movie songs 2018<br />
15
- download aravindante athidhikal movie songs<br />
16
- download hey jude movie songs 2018<br />
17
- download poomaram movie songs 2018<br />
18
- download kuttanpillayude sivarathri movie songs<br />
19
- download shikkari shambhu movie songs<br />
20
- download kalyanam movie songs 2018<br />
21
- download kala viplavam pranayam movie songs<br />
22
- download kinar movie songs 2018<br />
23
- download kidu movie songs 2018<br />
24
- download jallianwala bagh movie songs<br />
25
- download udalaazham movie songs 2018<br />
26
- download ottakkoru kaamukan movie songs<br />
27
- download orayiram kinakkalal movie songs<br />
28
- download kallai fm movie songs 2018<br />
29
- download velakkariyayirunnalum neeyen mohavalli movie songs<br />
30
- download old is gold movie songs 2018<br />
31
- download ennaalum sarath movie songs<br />
32
- download manikya malaraya poovi song mp3<br />
33
- download aararo song from koode movie<br />
34
- download vaanaville song from koode movie<br />
35
- download minnaminni song from koode movie<br />
36
- download paranne song from koode movie<br />
37
- download rasathi song from aravindante athidhikal movie<br />
38
- download yela la la song from hey jude movie<br />
39
- download ini oru kaalathe song from poomaram movie<br />
40
- download neramayi song from poomaram movie<br />
41
- download mruthu mandahasam song from poomaram movie<br />
42
- download kanne thaai malare song from aravindante athidhikal movie<br />
43
- download endhe kanna song from aravindante athidhikal movie<br />
44
- download kripaakari devi song from aravindante athidhikal movie<br />
45
- download ente shivane song from kuttanpillayude sivarathri movie<br />
46
- download chakka paattu song from kuttanpillayude sivarathri movie<br />
47
- download naadottukku song from kuttanpillayude sivarathri movie<br />
48
- download mazha song from shikkari shambhu movie<br />
49
- download thararaathara moolana kaattinu song from shikkari shambhu movie<br />
50
- download dhrithangapulakithan song from kalyanam movie<br />
51
- download nisa shalabhame song from hey jude movie<br />
52
- download hey don't worry jude song from hey jude movie<br />
53
- download pande nee ennil unde song from kalyanam movie<br />
54
- download medakkattu song from kala viplavam pranayam movie<br />
55
- download ayya sami song from kinar movie<br />
56
- download imayil song from kidu movie<br />
57
- download tha na na song from jallianwala bagh movie</p>
58
- <p>To avoid these problems, you should choose a site that is trustworthy, reputable, and user-friendly. A good site should have a large collection of Malayalam movie songs from different genres, artists, and years. It should also have high-quality audio, fast download speed, and easy navigation. Here are some of the best sites to download Malayalam movie songs from 2018:</p>
59
- <h4>SongsPK3</h4>
60
- <p>SongsPK3 is one of the best sites to download Malayalam movie songs from 2018 and other regional languages. It has various music sections to choose from, such as Malayalam, Tamil, Telugu, Bollywood, Punjabi, and Indian pop. It also has categories for latest releases, top 50, top 20, etc. You can download single tracks or entire albums in one go. The site has minimal ads and popups, so you can find and download your favorite songs with ease.</p>
61
- <h4>JioSaavn</h4>
62
- <p>JioSaavn is another best site to download Malayalam movie songs from 2018, which offers more than 40 million songs in 15 languages. Whether you’re looking for new hits or old classics, JioSaavn has it all. You can browse by genre, mood, artist, album, or playlist. You can also create your own custom playlists or listen to curated ones by experts. You can download songs for offline listening with a premium subscription or a Jio SIM card. The site has a sleek and simple interface, and you can also access it through the app or the web player.</p>
63
- <h4>Spotify</h4>
64
- <p>Spotify is one of the most popular sites to download Malayalam movie songs from 2018, as well as millions of other songs from around the world. Spotify lets you discover new music based on your taste, mood, and activity. You can explore by genre, artist, album, or podcast. You can also follow your favorite artists and playlists, or create your own. You can download songs for offline listening with a premium subscription, which also gives you ad-free and high-quality audio. The site has a modern and elegant design, and you can also use it on your mobile, tablet, or desktop.</p>
65
- <h4>Amazon Music</h4>
66
- <p>Amazon Music is another best site to download Malayalam movie songs from 2018, which offers over 60 million songs in various languages and genres. You can find Malayalam movie songs from 2018 in the regional section, or search by keyword, artist, or album. You can also listen to radio stations, podcasts, and live concerts. You can download songs for offline listening with a prime membership or an unlimited plan, which also gives you access to exclusive content and features. The site has a user-friendly and intuitive interface, and you can also enjoy it on your Alexa-enabled devices.</p>
67
- <h4>Gaana</h4>
68
- <p>Gaana is another best site to download Malayalam movie songs from 2018, which offers over 45 million songs in 21 languages. You can find Malayalam movie songs from 2018 in the regional section, or browse by new releases, top charts, editors' picks, or genres. You can also listen to radio mirchi, podcasts, and stories. You can download songs for offline listening with a plus subscription, which also gives you ad-free and HD audio. The site has a colorful and vibrant interface, and you can also access it through the app or the web player.</p>
69
- <h4>Raaga</h4>
70
- <p>Raaga is another best site to download Malayalam movie songs from 2018, which offers over 10 million songs in 24 languages. You can find Malayalam movie songs from 2018 in the regional section, or search by song name, movie name, artist name, or album name. You can also listen to devotional, classical, instrumental, and fusion music. You can download songs for offline listening with a premium subscription, which also gives you unlimited downloads and skips. The site has a simple and elegant interface, and you can also use it on your iOS or Android devices.</p>
71
- <h2>How to download Malayalam movie songs from these sites?</h2>
72
- <h3>Step 1: Choose a site and search for the song or album you want to download</h3>
73
- <p>The first step to download Malayalam movie songs from 2018 is to choose a site that suits your preferences and needs. You can use any of the sites mentioned above, or any other site that you trust and like. Once you have chosen a site, you need to search for the song or album that you want to download. You can use the search bar or the filters to find your desired song or album.</p>
74
- <h3>Step 2: Click on the download button or link and select the quality and format you prefer</h3>
75
- <p>The next step to download Malayalam movie songs from 2018 is to click on the download button or link that is available on the site. Depending on the site, you may need to sign up or log in before downloading. You may also need to choose the quality and format of the audio file that you want to download. Some sites may offer different options such as MP3, M4A, WAV, FLAC, etc. Some sites may also offer different bitrates such as 128 kbps, 320 kbps, etc.</p>
76
- <h3>Step 3: Save the file to your device or cloud storage and enjoy your music offline</h3>
77
- <p>The final step to download Malayalam movie songs from 2018 is to save the file to your device or cloud storage. You may need to choose a location or folder where you want to save the file. You may also need to rename the file if you want to change its default name. Once the file is saved, you can enjoy your music offline anytime and anywhere.</p>
78
- <h2>Conclusion</h2>
79
- <h3>Summary of the main points and benefits of downloading Malayalam movie songs from 2018</h3>
80
- <p>In conclusion, downloading Malayalam movie songs from 201 8 is a great way to enjoy the best of Malayalam cinema and music. Malayalam movie songs are songs that are featured in Malayalam movies, and they are known for their catchy tunes, meaningful lyrics, and expressive vocals. They have a universal appeal that transcends linguistic and cultural barriers. There are many sites that offer Malayalam movie songs for free download or online streaming, but you should choose a site that is trustworthy, reputable, and user-friendly. Some of the best sites to download Malayalam movie songs from 2018 are SongsPK3, JioSaavn, Spotify, Amazon Music, Gaana, and Raaga. To download Malayalam movie songs from these sites, you need to follow three simple steps: choose a site and search for the song or album you want to download, click on the download button or link and select the quality and format you prefer, and save the file to your device or cloud storage and enjoy your music offline. By downloading Malayalam movie songs from 2018, you can experience the beauty and diversity of Kerala's culture, landscape, and language.</p>
81
- <h3>FAQs</h3>
82
- <p>Here are some of the frequently asked questions about downloading Malayalam movie songs from 2018:</p>
83
- <ul>
84
- <li><b>Q: Is it legal to download Malayalam movie songs from 2018?</b></li>
85
- <li>A: It depends on the site and the song that you are downloading. Some sites may have the permission or license to offer Malayalam movie songs for free download or online streaming, while others may not. Some songs may be in the public domain or under creative commons licenses, while others may be protected by copyright laws. You should always check the terms and conditions of the site and the song before downloading.</li>
86
- <li><b>Q: Is it safe to download Malayalam movie songs from 2018?</b></li>
87
- <li>A: It depends on the site and the file that you are downloading. Some sites may have viruses, malware, or spam that can harm your device or data, while others may not. Some files may have low-quality audio or broken links that can ruin your listening experience, while others may not. You should always scan the site and the file with a reliable antivirus software before downloading.</li>
88
- <li><b>Q: What are some of the best Malayalam movie songs from 2018?</b></li>
89
- <li>A: There are many amazing Malayalam movie songs from 2018 that you can download and enjoy. Some of them are:</li>
90
- <ul>
91
- <li>Maanam Thudukkanu from Odiyan</li>
92
- <li>Poomuthole from Joseph</li>
93
- <li>Uyirin Nadhiye from Mayaanadhi</li>
94
- <li>Karutha Penne from Sanah Moidutty</li>
95
- <li>Jeevamshamayi from Theevandi</li>
96
- </ul>
97
- <li><b>Q: How can I listen to Malayalam movie songs from 2018 online?</b></li>
98
- <li>A: If you don't want to download Malayalam movie songs from 2018, you can also listen to them online on various sites or apps. Some of them are:</li>
99
- <ul>
100
- <li>YouTube</li>
101
- <li>Wynk Music</li>
102
- <li>Hungama Music</li>
103
- <li>Musixmatch</li>
104
- <li>SoundCloud</li>
105
- </ul>
106
- <li><b>Q: How can I share Malayalam movie songs from 2018 with my friends?</b></li>
107
- <li>A: If you want to share Malayalam movie songs from 2018 with your friends, you can use various methods such as:</li>
108
- <ul>
109
- <li>Email</li>
110
- <li>Social media</li>
111
- <li>Messaging apps</li>
112
- <li>Bluetooth</li>
113
- <li>QR code</li>
114
- </ul>
115
- <p>I hope this article has helped you learn how to download 2018 Malayalam movie songs from some of the best sites on the internet. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy listening!</p> 197e85843d<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Benefits of Using Facebook Lite APK 4 on Your Android Device - Faster Smaller and Cheaper.md DELETED
@@ -1,153 +0,0 @@
1
- <br />
2
- <h1>Facebook Lite APK 4: A Faster and Lighter Way to Connect with Friends</h1>
3
- <p>Do you love using Facebook but hate how slow and heavy it is on your Android device? Do you wish you could enjoy all the features of Facebook without draining your data and battery life? If you answered yes to any of these questions, then you might want to try Facebook Lite APK 4, the latest version of the official Facebook client that is designed for low-power Android devices or ones with limited Internet connections. In this article, we will tell you everything you need to know about Facebook Lite APK 4, including what it is, how to download and install it, and how to use it effectively.</p>
4
- <h2>facebook lite apk 4</h2><br /><p><b><b>Download</b> &#9734; <a href="https://jinyurl.com/2uNK4l">https://jinyurl.com/2uNK4l</a></b></p><br /><br />
5
- <h2>What is Facebook Lite APK 4?</h2>
6
- <p>Facebook Lite APK 4 is a lighter and faster version of Facebook that uses less data and works in all network conditions. It is compatible with Android devices running on version 2.3 or higher, and it has a file size of only 2.33 MB, which means it can be downloaded and installed quickly and easily. Facebook Lite APK 4 has all the essential features of Facebook, such as:</p>
7
- <ul>
8
- <li>Posting status updates, photos, videos, and stories</li>
9
- <li>Liking, commenting, and sharing posts from your friends and pages you follow</li>
10
- <li>Chatting with your friends and groups using Messenger Lite</li>
11
- <li>Watching live videos and stories from your friends and pages you follow</li>
12
- <li>Finding local events, businesses, and services near you</li>
13
- <li>Playing games and using your favorite apps</li>
14
- </ul>
15
- <p>Facebook Lite APK 4 also has some additional features that make it more convenient and user-friendly, such as:</p>
16
- <ul>
17
- <li>A dark mode option that reduces eye strain and saves battery life</li>
18
- <li>A data saver option that lets you control how much data you use for images and videos</li>
19
- <li>A quick access bar that lets you switch between tabs easily</li>
20
- <li>A notification center that lets you see all your notifications in one place</li>
21
- <li>A search bar that lets you find anything on Facebook quickly</li>
22
- </ul>
23
- <h2>The benefits of using Facebook Lite APK 4</h2>
24
- <p>There are many benefits of using Facebook Lite APK 4 instead of the regular Facebook app, such as:</p>
25
- <ul>
26
- <li>It saves your data - by using less data for images and videos, you can save money by using less data. You can also choose when to download or upload photos and videos, or turn off auto-play for videos.</li>
27
- <li>It saves your battery life - by using less resources and running faster, it consumes less battery power. You can also enable dark mode to reduce battery usage further.</li>
28
- <li>It works on all networks - by being designed for 2G networks and areas with slow or unstable Internet connections, it ensures that you can always stay connected with your friends and family. You can also use it offline by caching some content.</li>
29
- <li>It works on old Android phones - by being compatible with Android devices running on version 2.3 or higher, it allows you to use it on older Android phones that are not supported by the regular Facebook app.</li>
30
- <li>It loads quickly - by being smaller and faster, it is the fastest app to upload photos and see updates from your friends.</li>
31
- </ul>
32
- <h2>How to download and install Facebook Lite APK 4 on your Android device</h2>
33
- <p>Downloading and installing Facebook Lite APK 4 on your Android device is very easy and simple. Just follow these steps:</p>
34
- <p>facebook lite apk 4 download free<br />
35
- facebook lite apk 4 latest version<br />
36
- facebook lite apk 4 for android 2.3<br />
37
- facebook lite apk 4 mod<br />
38
- facebook lite apk 4 offline installer<br />
39
- facebook lite apk 4 update<br />
40
- facebook lite apk 4 old version<br />
41
- facebook lite apk 4 for pc<br />
42
- facebook lite apk 4 for ios<br />
43
- facebook lite apk 4 for windows phone<br />
44
- facebook lite apk 4 hack<br />
45
- facebook lite apk 4 no ads<br />
46
- facebook lite apk 4 dark mode<br />
47
- facebook lite apk 4 beta<br />
48
- facebook lite apk 4 pro<br />
49
- facebook lite apk 4 premium<br />
50
- facebook lite apk 4 cracked<br />
51
- facebook lite apk 4 unlocked<br />
52
- facebook lite apk 4 review<br />
53
- facebook lite apk 4 features<br />
54
- facebook lite apk 4 size<br />
55
- facebook lite apk 4 speed<br />
56
- facebook lite apk 4 data usage<br />
57
- facebook lite apk 4 security<br />
58
- facebook lite apk 4 privacy<br />
59
- facebook lite apk 4 comparison<br />
60
- facebook lite apk 4 alternatives<br />
61
- facebook lite apk 4 benefits<br />
62
- facebook lite apk 4 disadvantages<br />
63
- facebook lite apk 4 problems<br />
64
- facebook lite apk 4 issues<br />
65
- facebook lite apk 4 bugs<br />
66
- facebook lite apk 4 fixes<br />
67
- facebook lite apk 4 tips<br />
68
- facebook lite apk 4 tricks<br />
69
- facebook lite apk 4 guide<br />
70
- facebook lite apk 4 tutorial<br />
71
- facebook lite apk 4 how to use<br />
72
- facebook lite apk 4 how to install<br />
73
- facebook lite apk 4 how to update<br />
74
- facebook lite apk 4 how to uninstall<br />
75
- facebook lite apk 4 how to download videos<br />
76
- facebook lite apk 4 how to change language<br />
77
- facebook lite apk 4 how to enable dark mode<br />
78
- facebook lite apk 4 how to block ads<br />
79
- facebook lite apk 4 how to save data<br />
80
- facebook lite apk 4 how to improve speed<br />
81
- facebook lite apk 4 how to increase security</p>
82
- <h3>Step 1: Go to the official website or Uptodown</h3>
83
- <p>You can download Facebook Lite APK 4 from the official website or from Uptodown, a trusted and safe platform for downloading apps. To go to the official website, click here. To go to Uptodown, click here. You can also scan the QR code below to go directly to the download page.</p>
84
- <p><img src="https://chart.googleapis.com/chart?chs=150x150&cht=qr&chl=https://facebook-lite.en.uptodown.com/android/download&choe=UTF-8" alt="QR code for Uptodown"></p>
85
- <h3>Step 2: Tap on the download button and wait for the file to be downloaded</h3>
86
- <p>Once you are on the download page, tap on the green download button and wait for the file to be downloaded. The file name should be something like facebook-lite-4-0-0-6-119.apk and the file size should be around 2.33 MB.</p>
87
- <h3>Step 3: Open the file and tap on install</h3>
88
- <p>After the file is downloaded, open it and tap on install. You might need to enable unknown sources in your settings if you haven't done so before. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.</p>
89
- <h3>Step 4: Launch the app and log in with your Facebook account</h3>
90
- <p>Once the app is installed, launch it and log in with your Facebook account. You can also create a new account if you don't have one. You will see a simple and minimalist interface that lets you access all the features of Facebook Lite APK 4.</p>
91
- <h2>How to use Facebook Lite APK 4 effectively</h2>
92
- <p>Now that you have downloaded and installed Facebook Lite APK 4 on your Android device, you might want to know how to use it effectively. Here are some tips and tricks that will help you get the most out of this app:</p>
93
- <h3>Tips for saving data and battery life</h3>
94
- <p>One of the main advantages of Facebook Lite APK 4 is that it saves your data and battery life by using less resources and data. However, you can still optimize your usage by following these tips:</p>
95
- <ul>
96
- <li>Enable data saver mode - this will reduce the amount of data used for images and videos by compressing them or showing them in low quality. To enable data saver mode, go to Settings > Data Saver and toggle it on.</li>
97
- <li>Disable auto-play for videos - this will prevent videos from playing automatically when you scroll through your news feed. This will save your data and battery life by avoiding unnecessary video streaming. To disable auto-play for videos, go to Settings > Video Auto-play and select Never Play Videos Automatically.</li>
98
- <li>Download photos and videos only when you need them - this will save your data by letting you choose when to download or upload photos and videos. You can also delete them after viewing them if you don't need them anymore. To download or upload photos and videos, tap on them and select Download or Upload.</li>
99
- </ul>
100
- <h3>Tips for managing notifications and privacy settings</h3>
101
- <p>Another benefit of Facebook Lite APK 4 is that it lets you manage your notifications and privacy settings easily and conveniently. Here are some tips for doing that:</p>
102
- <ul>
103
- <li>Customize your notification settings - this will let you choose which notifications you want to receive and how you want to receive them. You can also mute or block notifications from specific people or pages. To customize your notification settings, go to Settings > Notifications and select the options you prefer.</li>
104
- <li>Adjust your privacy settings - this will let you control who can see your posts, who can contact you, who can tag you, and who can find you on Facebook. You can also review and edit your activity log, profile information, and security settings. To adjust your privacy settings, go to Settings > Privacy and select the options you prefer.</li>
105
- <li>Use incognito mode - this will let you browse Facebook without leaving any traces on your device or online. This is useful if you want to keep your activity private or if you are using a shared device. To use incognito mode, tap on the three-dot icon at the top right corner of the app and select Incognito Mode.</li>
106
- </ul>
107
- <h3>Tips for accessing other Facebook features and services</h3>
108
- <p>Facebook Lite APK 4 has all the essential features of Facebook , but it also lets you access other Facebook features and services that are not available on the regular Facebook app, such as:</p>
109
- <ul>
110
- <li>Facebook Watch - this is a video platform that lets you watch original shows, live sports, news, and entertainment from your favorite creators and publishers. You can also create your own watch parties and chat with your friends while watching. To access Facebook Watch, tap on the video icon at the bottom of the app.</li>
111
- <li>Facebook Marketplace - this is a place where you can buy and sell items locally or globally. You can browse through thousands of listings, make offers, chat with sellers, and arrange transactions. You can also list your own items for sale or give them away for free. To access Facebook Marketplace, tap on the store icon at the bottom of the app.</li>
112
- <li>Facebook Gaming - this is a gaming hub where you can play instant games, watch live game streams, join gaming groups, and discover new games. You can also create your own gaming videos and streams and share them with your friends. To access Facebook Gaming, tap on the joystick icon at the bottom of the app.</li>
113
- </ul>
114
- <h2>Conclusion</h2>
115
- <p>Facebook Lite APK 4 is a great alternative to the regular Facebook app for Android users who want to save data, battery life, and storage space while enjoying all the features of Facebook. It is fast, light, and easy to use, and it works on all network conditions and Android devices. It also has some exclusive features that are not available on the regular Facebook app, such as dark mode, data saver mode, incognito mode, Facebook Watch, Facebook Marketplace, and Facebook Gaming. If you want to try Facebook Lite APK 4 for yourself, you can download it from the official website or Uptodown by following the steps we have outlined above. We hope you found this article helpful and informative. If you have any questions or feedback, please leave them in the comments section below.</p>
116
- <h3>FAQs</h3>
117
- <p>Here are some frequently asked questions about Facebook Lite APK 4:</p>
118
- <ol>
119
- <li>Is Facebook Lite APK 4 safe to download and use?</li>
120
- <p>Yes, Facebook Lite APK 4 is safe to download and use. It is developed by Facebook Inc., the same company that develops the regular Facebook app. It is also verified by Uptodown, a trusted and safe platform for downloading apps. However, you should always be careful when downloading apps from unknown sources and check the permissions they require before installing them.</p>
121
- <li>What are the differences between Facebook Lite APK 4 and the regular Facebook app?</li>
122
- <p>The main differences between Facebook Lite APK 4 and the regular Facebook app are:</p>
123
- <ul>
124
- <li>Facebook Lite APK 4 is smaller and faster than the regular Facebook app. It has a file size of only 2.33 MB, while the regular Facebook app has a file size of around 60 MB.</li>
125
- <li>Facebook Lite APK 4 uses less data and battery life than the regular Facebook app. It compresses images and videos and lets you control how much data you use for them. It also runs faster and consumes less battery power.</li>
126
- <li>Facebook Lite APK 4 works on all network conditions and Android devices than the regular Facebook app. It is designed for 2G networks and areas with slow or unstable Internet connections. It is also compatible with Android devices running on version 2.3 or higher, while the regular Facebook app requires version 4.1 or higher.</li>
127
- <li>Facebook Lite APK 4 has some additional features that are not available on the regular Facebook app, such as dark mode, data saver mode, incognito mode, Facebook Watch, Facebook Marketplace, and Facebook Gaming.</li>
128
- </ul>
129
- <li>How can I update Facebook Lite APK 4 to the latest version?</li>
130
- <p>You can update Facebook Lite APK 4 to the latest version by following these steps:</p>
131
- <ul>
132
- <li>Go to the official website or Uptodown and check if there is a new version available.</li>
133
- <li>If there is a new version available, tap on the download button and wait for the file to be downloaded.</li>
134
- <li>Open the file and tap on install. The new version will overwrite the old one.</li>
135
- <li>Launch the app and enjoy the new features and improvements.</li>
136
- </ul>
137
- <li>How can I delete Facebook Lite APK 4 from my Android device?</li>
138
- <p>You can delete Facebook Lite APK 4 from your Android device by following these steps:</p>
139
- <ul>
140
- <li>Go to Settings > Apps > Facebook Lite.</li>
141
- <li>Tap on Uninstall and confirm your choice.</li>
142
- <li>The app will be removed from your device.</li>
143
- </ul>
144
- <li>How can I contact Facebook support if I have any issues with Facebook Lite APK 4?</li>
145
- <p>You can contact Facebook support if you have any issues with Facebook Lite APK 4 by following these steps:</p>
146
- <ul>
147
- <li>Go to Settings > Help & Support.</li>
148
- <li>Tap on Report a Problem and select the type of problem you are facing.</li>
149
- <li>Describe your problem in detail and attach any screenshots or logs if possible.</li>
150
- <li>Tap on Send and wait for a response from Facebook support.</li>
151
- </ul></p> 197e85843d<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download MIR M and Master Your Professions and Skills.md DELETED
@@ -1,103 +0,0 @@
1
-
2
- <h1>Download MIR M: A Guide to the New MMORPG Game</h1>
3
- <p>If you are a fan of classic MMORPG games, you might have heard of the legendary Mir series that has been popular in Asia for over two decades. Now, you can experience the latest installment of this franchise, MIR M, on your mobile or PC devices. In this article, we will tell you everything you need to know about this game, including its features, how to download it, and why you should play it.</p>
4
- <h2>What is MIR M?</h2>
5
- <p>MIR M is a role-playing game developed by Wemade Co., Ltd, based on the world and characters of the Mir IP. It is a faithful successor of the classic MMORPG style, featuring isometric viewpoint, 8-directional grid, and artworks that reflect the oriental fantasy theme. However, it also incorporates new elements and systems that make it more appealing and enjoyable for modern gamers.</p>
6
- <h2>download mir m</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://jinyurl.com/2uNKgb">https://jinyurl.com/2uNKgb</a></b></p><br /><br />
7
- <h3>The features of MIR M</h3>
8
- <p>Some of the features that make MIR M stand out from other MMORPG games are:</p>
9
- <h4>- Mandala: customize your character's growth</h4>
10
- <p>Mandala is a unique growth specialization system that allows you to tailor your character's stats and abilities according to your preference. It is divided into two categories: Combat and Profession. Each category has many Spot Points that provide various bonuses. By connecting different Spot Points and activating different effects, you can create your own build that suits your playstyle.</p>
11
- <h4>- Rumble Battle and Clan Battle: compete with other players across servers</h4>
12
- <p>If you are looking for some PvP action, you can join the Rumble Battle or the Clan Battle events that test your skills and teamwork against other players from different servers. In the Rumble Battle, you can fight solo or in a party against other individuals in a free-for-all or team-based mode. In the Clan Battle, you can join a clan and cooperate with other members to capture territories and castles from rival clans.</p>
13
- <h4>- Profession and Street Stall: master your skills and trade with others</h4>
14
- <p>Profession is another growth system that focuses on your talents and hobbies. You can choose from various professions such as gathering, mining, fishing, crafting, cooking, and more. By completing missions and learning skills, you can hone your proficiency and earn rewards. You can also set up a Street Stall to sell your items or buy from other players.</p>
15
- <h3>How to download MIR M?</h3>
16
- <p>MIR M is available for both Android and PC/Mac devices. Here are the steps to download it:</p>
17
- <h4>- For Android devices</h4>
18
- <p>You can download MIR M from the Google Play Store by following these steps:</p>
19
- <p>download mir m apk<br />
20
- download mir m for pc<br />
21
- download mir m game<br />
22
- download mir m android<br />
23
- download mir m mod apk<br />
24
- download mir m ios<br />
25
- download mir m latest version<br />
26
- download mir m on mac<br />
27
- download mir m bluestacks<br />
28
- download mir m emulator<br />
29
- download mir m offline<br />
30
- download mir m obb<br />
31
- download mir m update<br />
32
- download mir m hack<br />
33
- download mir m cheats<br />
34
- download mir m free<br />
35
- download mir m online<br />
36
- download mir m beta<br />
37
- download mir m english version<br />
38
- download mir m global<br />
39
- download mir m wemade co ltd<br />
40
- download mir m play store<br />
41
- download mir m apkcombo<br />
42
- download mir m role playing game<br />
43
- download mir m mmorpg<br />
44
- download mir m mandala system<br />
45
- download mir m rumble battle<br />
46
- download mir m clan battle<br />
47
- download mir m profession system<br />
48
- download mir m street stall system<br />
49
- download mir m wayfarer travel system<br />
50
- download mir m sun's fast growth event<br />
51
- download mir m skill tome system<br />
52
- download mir m hidden valley capture system<br />
53
- download mir m castle siege system<br />
54
- download mir m companion system<br />
55
- download mir m mount system<br />
56
- download mir m avatar system<br />
57
- download mir m trailer video<br />
58
- download mir m gameplay video</p>
59
- <ol>
60
- <li>Open the Google Play Store app on your device.</li>
61
- <li>Search for "MIR M" in the search bar.</li>
62
- <li>Select the game from the results and tap on "Install".</li>
63
- <li>Wait for the game to download and install on your device.</li>
64
- <li>Launch the game and enjoy!</li>
65
- </ol>
66
- <h4>- For PC and Mac devices</h4>
67
- <p>You can download MIR M from the BlueStacks app player by following these steps:</p>
68
- <ol>
69
- <li>Download and install BlueStacks on your PC or Mac from <a href="(^2^)">this link</a>.</li>
70
- <li>Launch BlueStacks and sign in with your Google account or create a new one.</li>
71
- <li>Go to the Google Play Store app on BlueStacks and search for "MIR M".</li>
72
- <li>Select the game from the results and click on "Install".</li>
73
- <li>Wait for the game to download and install on BlueStacks.</li>
74
- <li>Launch the game and enjoy!</li>
75
- </ol>
76
- <h2>Why should you play MIR M?</h2>
77
- <p>MIR M is not just another MMORPG game. It is a game that offers you a rich and immersive experience that will keep you hooked for hours. Here are some of the benefits of playing MIR M:</p>
78
- <h3>The benefits of playing MIR M</h3>
79
- <p>Some of the benefits that you can get from playing MIR M are:</p>
80
- <h4>- Experience the classic MMORPG style with modern graphics and gameplay</h4>
81
- <p>MIR M is a game that pays homage to the legacy of the Mir series, but also adds new features and improvements that make it more fun and accessible. You can enjoy the nostalgic feeling of playing a classic MMORPG game, but also appreciate the stunning graphics, smooth animations, and user-friendly interface that enhance your gaming experience.</p>
82
- <h4>- Choose your own path of adventure and combat in the vast Mir continent</h4>
83
- <p>MIR M is a game that gives you the freedom to explore and interact with the world as you wish. You can choose from four classes: Warrior, Wizard, Taoist, or Assassin, each with their own skills and abilities. You can also customize your character's appearance, equipment, and growth using the Mandala system. You can embark on various quests, dungeons, raids, and events that will challenge your skills and reward you with items and resources. You can also join forces with other players or fight against them in different PvP modes.</p>
84
- <h4>- Join a clan and participate in epic wars for glory and rewards</h4>
85
- <p>MIR M is a game that encourages you to socialize and cooperate with other players. You can join a clan or create your own one, and communicate with other members using the chat and voice functions. You can also participate in clan battles that pit your clan against other clans in a massive war for territory and resources. You can earn clan points, reputation, and rewards by contributing to your clan's success.</p>
86
- <h2>Conclusion</h2>
87
- <p>MIR M is a game that will appeal to both fans of the Mir series and new players who are looking for a quality MMORPG game. It has many features and benefits that make it worth playing, such as the Mandala system, the Rumble Battle, the Profession system, the Street Stall system, the Clan Battle, and more. It is also easy to download and play on your Android or PC/Mac devices using the Google Play Store or BlueStacks app player. If you are ready to enter the world of Mir and embark on an epic adventure, download MIR M today!</p>
88
- <h3>FAQs</h3>
89
- <p>Here are some frequently asked questions about MIR M:</p>
90
- <ol>
91
- <li><b>Is MIR M free to play?</b></li>
92
- <p>Yes, MIR M is free to play. However, it also offers optional in-app purchases that can enhance your gaming experience.</p>
93
- <li><b>Can I play MIR M offline?</b></li>
94
- <p>No, MIR M requires an internet connection to play. You need to connect to a server to access the game's content and features.</p>
95
- <li><b>Can I play MIR M with my friends?</b></li>
96
- <p>Yes, you can play MIR M with your friends. You can add them as friends in the game and chat with them using text or voice messages. You can also invite them to join your party or clan, or challenge them to a duel or rumble battle.</p>
97
- <li><b>How can I get more items and resources in MIR M?</b></li>
98
- <p>You can get more items and resources in MIR M by completing quests, dungeons, raids, events, rumble battles, clan battles, etc. You can also buy them from other players using the Street Stall system or from the in-game shop using real money.</p>
99
- <li><b>How can I contact the customer service of MIR M?</b></li>
100
- <p>You can contact the customer service of MIR M by sending an email to <a href="">[email protected]</a>. You can also visit their official website <a href="">https://mirm.wemade.com/</a> or their Facebook page <a href="">https://www.facebook.com/MIRM.WEMADE/</a> for more information and updates.</p>
101
- </ol></p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Engineering Drawing Practice for Schools and Colleges - SP 46 (2003) PDF.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>SP 46 2003 PDF Free Download: A Guide for Engineering Drawing Practice</h1>
3
- <p>Engineering drawing is a graphical language that communicates technical information and design ideas. It is essential for engineers, architects, designers, and technicians to master the skills and standards of engineering drawing. One of the most widely used and recognized standards for engineering drawing in India is SP 46 2003.</p>
4
- <p>In this article, we will explain what is SP 46 2003, how to download it for free, and how to use it for engineering drawing practice. We will also provide some examples and exercises to help you improve your engineering drawing skills.</p>
5
- <h2>sp 46 2003 pdf free download</h2><br /><p><b><b>Download File</b> &#10042; <a href="https://jinyurl.com/2uNLP1">https://jinyurl.com/2uNLP1</a></b></p><br /><br />
6
- <h2>What is SP 46 2003?</h2>
7
- <h3>Definition and scope of SP 46 2003</h3>
8
- <p>SP 46 2003 is a standard published by the Bureau of Indian Standards (BIS) that provides guidelines and recommendations for engineering drawing practice for schools and colleges. It covers various aspects of engineering drawing, such as formats, scales, symbols, dimensions, tolerances, projections, sections, views, lettering, etc. It also includes some appendices that provide additional information on topics such as geometrical constructions, orthographic projections, isometric projections, perspective projections, etc.</p>
9
- <p>SP 46 2003 is based on the international standards ISO 128 and ISO 129, which are widely accepted and followed in many countries. It also incorporates some modifications and additions to suit the Indian conditions and practices. SP 46 2003 supersedes the previous standard SP 46:1988.</p>
10
- <h3>Benefits and features of SP 46 2003</h3>
11
- <p>SP 46 2003 has many benefits and features that make it a useful and reliable standard for engineering drawing practice. Some of them are:</p>
12
- <ul>
13
- <li>It provides a uniform and consistent method of representing technical information and design ideas in a clear and concise manner.</li>
14
- <li>It helps to avoid ambiguity and confusion in communication and interpretation of engineering drawings.</li>
15
- <li>It facilitates the exchange and transfer of technical data and drawings among different parties involved in engineering projects.</li>
16
- <li>It enhances the quality and accuracy of engineering drawings and reduces the chances of errors and mistakes.</li>
17
- <li>It promotes the development and improvement of engineering drawing skills among students and professionals.</li>
18
- </ul>
19
- <h2>How to download SP 46 2003 PDF for free?</h2>
20
- <h3>Sources and links for SP 46 2003 PDF free download</h3>
21
- <p>SP 46 2003 PDF is available online for free download from various sources. Some of them are:</p>
22
- <ul>
23
- <li>The official website of BIS (https://bis.gov.in/). You can search for SP 46 in the online catalogue or browse through the list of standards under the Production and General Engineering division. You can also download other related standards from BIS.</li>
24
- <li>The website of Public.Resource.Org (https://law.resource.org/pub/in/bis/S01/is.sp.46.2003.pdf). This is a non-profit organization that provides access to public domain documents, including legal documents, standards, codes, etc. You can download SP 46 as well as other BIS standards from this website.</li>
25
- <li>The website of Internet Archive (https://archive.org/details/gov.in.is.sp.46.2003). This is a digital library that preserves and provides access to historical collections of books, music, videos, websites, etc. You can download SP 46 as well as other BIS standards from this website.</li>
26
- <li>The website of Scribd (https://www.scribd.com/document/245568876/is-sp-46-200 2003?</h3>
27
- <p>You can get feedback on your engineering drawings using SP 46 2003 by submitting them to your teachers, mentors, peers, or experts who can review and evaluate your drawings. You can also use online platforms or forums that allow you to share and get feedback on your engineering drawings from other users.</p>
28
- <p>sp 46 2003 engineering drawing practice pdf<br />
29
- sp 46 2003 pdf download for free<br />
30
- sp 46 2003 bureau of indian standards pdf<br />
31
- sp 46 2003 engineering drawing standards pdf<br />
32
- sp 46 2003 pdf free download archive.org<br />
33
- sp 46 2003 engineering drawing book pdf<br />
34
- sp 46 2003 pdf free download scribd.com<br />
35
- sp 46 2003 engineering drawing practice for schools and colleges pdf<br />
36
- sp 46 2003 pdf free download law.resource.org<br />
37
- sp 46 2003 engineering drawing syllabus pdf<br />
38
- sp 46 2003 pdf free download production and general engineering<br />
39
- sp 46 2003 engineering drawing examples pdf<br />
40
- sp 46 2003 pdf free download drawings pgd 24<br />
41
- sp 46 2003 engineering drawing symbols pdf<br />
42
- sp 46 2003 pdf free download legally binding document<br />
43
- sp 46 2003 engineering drawing dimensions and tolerances pdf<br />
44
- sp 46 2003 pdf free download right to information act<br />
45
- sp 46 2003 engineering drawing projection methods pdf<br />
46
- sp 46 2003 pdf free download equivalence superceding superceded by<br />
47
- sp 46 2003 engineering drawing section views pdf<br />
48
- sp 46 2003 pdf free download step out from the old to the new<br />
49
- sp 46 2003 engineering drawing isometric views pdf<br />
50
- sp 46 2003 pdf free download invent a new india using knowledge<br />
51
- sp 46 2003 engineering drawing orthographic views pdf<br />
52
- sp 46 2003 pdf free download public education and public safety<br />
53
- sp 46 2003 engineering drawing auxiliary views pdf<br />
54
- sp 46 2003 pdf free download equal justice for all<br />
55
- sp 46 2003 engineering drawing hidden lines and surfaces pdf<br />
56
- sp 46 2003 pdf free download better informed citizenry<br />
57
- sp 46 2003 engineering drawing center lines and extension lines pdf<br />
58
- sp 46 2003 pdf free download rule of law world trade and world peace<br />
59
- sp</p> 197e85843d<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : __init__.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
12
- from .replicate import DataParallelWithCallback, patch_replication_callback
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/diffq/diffq.py DELETED
@@ -1,286 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Differentiable quantizer based on scaled noise injection.
9
- """
10
- from dataclasses import dataclass
11
- import math
12
- import typing as tp
13
-
14
- import torch
15
-
16
- from .base import BaseQuantizer
17
- from .uniform import uniform_quantize, uniform_unquantize
18
- from .utils import simple_repr
19
-
20
-
21
- class DiffQuantizer(BaseQuantizer):
22
- @dataclass
23
- class _QuantizedParam(BaseQuantizer._QuantizedParam):
24
- logit: torch.nn.Parameter
25
-
26
- def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
27
- group_size: int = 1, min_bits: float = 2, max_bits: float = 15,
28
- param="bits", noise="gaussian",
29
- init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq",
30
- exclude: tp.List[str] = [], detect_bound: bool = True):
31
- """
32
- Differentiable quantizer based on scaled noise injection.
33
- For every parameter `p` in the model, this introduces a number of bits parameter
34
- `b` with the same dimensions (when group_size = 1).
35
- Before each forward, `p` is replaced by `p + U`
36
- with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization
37
- step for `b` bits.
38
- This noise approximates the quantization noise in a differentiable manner, both
39
- with respect to the unquantized parameter `p` and the number of bits `b`.
40
-
41
- At eveluation (as detected with `model.eval()`), the model is replaced
42
- by its true quantized version, and restored when going back to training.
43
-
44
- When doing actual quantization (for serialization, or evaluation),
45
- the number of bits is rounded to the nearest integer, and needs to be stored along.
46
- This will cost a few bits per dimension. To reduce this cost, one can use `group_size`,
47
- which will use a single noise level for multiple weight entries.
48
-
49
- You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the
50
- model size in MB. You can then use this estimate as a penalty in your training loss.
51
-
52
- Args:
53
- model (torch.nn.Module): model to quantize
54
- min_size (float): minimum size in MB of a parameter to be quantized.
55
- float16 (bool): if a layer is smaller than min_size, should we still do float16?
56
- group_size (int): weight entries are groupped together to reduce the number
57
- of noise scales to store. This should divide the size of all parameters
58
- bigger than min_size.
59
- min_bits (float): minimal number of bits.
60
- max_bits (float): maximal number of bits.
61
- init_bits (float): initial number of bits.
62
- extra_bits (float): extra bits to add for actual quantization (before roundoff).
63
- suffix (str): suffix used for the name of the extra noise scale parameters.
64
- exclude (list[str]): list of patterns used to match parameters to exclude.
65
- For instance `['bias']` to exclude all bias terms.
66
- detect_bound (bool): if True, will detect bound parameters and reuse
67
- the same quantized tensor for both, as well as the same number of bits.
68
-
69
- ..Warning::
70
- You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly.
71
-
72
- """
73
- self.group_size = group_size
74
- self.min_bits = min_bits
75
- self.max_bits = max_bits
76
- self.init_bits = init_bits
77
- self.extra_bits = extra_bits
78
- self.suffix = suffix
79
- self.param = param
80
- self.noise = noise
81
- assert noise in ["gaussian", "uniform"]
82
- self._optimizer_setup = False
83
-
84
- self._min_noise = 1 / (2 ** self.max_bits - 1)
85
- self._max_noise = 1 / (2 ** self.min_bits - 1)
86
-
87
- assert group_size >= 0
88
- assert min_bits < init_bits < max_bits, \
89
- "init_bits must be between min_bits and max_bits excluded3"
90
-
91
- for name, _ in model.named_parameters():
92
- if name.endswith(suffix):
93
- raise RuntimeError("The model already has some noise scales parameters, "
94
- "maybe you used twice a DiffQuantizer on the same model?.")
95
-
96
- super().__init__(model, min_size, float16, exclude, detect_bound)
97
-
98
- def _get_bits(self, logit: torch.Tensor):
99
- if self.param == "noise":
100
- return torch.log2(1 + 1 / self._get_noise_scale(logit))
101
- else:
102
- t = torch.sigmoid(logit)
103
- return self.max_bits * t + (1 - t) * self.min_bits
104
-
105
- def _get_noise_scale(self, logit: torch.Tensor):
106
- if self.param == "noise":
107
- t = torch.sigmoid(logit)
108
- return torch.exp(t * math.log(self._min_noise) + (1 - t) * math.log(self._max_noise))
109
- else:
110
- return 1 / (2 ** self._get_bits(logit) - 1)
111
-
112
- def _register_param(self, name, param, module, other):
113
- if other is not None:
114
- return self.__class__._QuantizedParam(
115
- name=name, param=param, module=module, logit=other.logit, other=other)
116
- assert self.group_size == 0 or param.numel() % self.group_size == 0
117
- # we want the initial number of bits to be init_bits.
118
- if self.param == "noise":
119
- noise_scale = 1 / (2 ** self.init_bits - 1)
120
- t = (math.log(noise_scale) - math.log(self._max_noise)) / (
121
- math.log(self._min_noise) - math.log(self._max_noise))
122
- else:
123
- t = (self.init_bits - self.min_bits) / (self.max_bits - self.min_bits)
124
- assert 0 < t < 1
125
- logit = torch.logit(torch.tensor(float(t)))
126
- assert abs(self._get_bits(logit) - self.init_bits) < 1e-5
127
- if self.group_size > 0:
128
- nparam = param.numel() // self.group_size
129
- else:
130
- nparam = 1
131
- logit = torch.nn.Parameter(
132
- torch.full(
133
- (nparam,),
134
- logit,
135
- device=param.device))
136
- module.register_parameter(name + self.suffix, logit)
137
- return self.__class__._QuantizedParam(
138
- name=name, param=param, module=module, logit=logit, other=None)
139
-
140
- def clear_optimizer(self, optimizer: torch.optim.Optimizer):
141
- params = [qp.logit for qp in self._qparams]
142
-
143
- for group in optimizer.param_groups:
144
- new_params = []
145
- for q in list(group["params"]):
146
- matched = False
147
- for p in params:
148
- if p is q:
149
- matched = True
150
- if not matched:
151
- new_params.append(q)
152
- group["params"][:] = new_params
153
-
154
- def setup_optimizer(self, optimizer: torch.optim.Optimizer,
155
- lr: float = 1e-3, **kwargs):
156
- """
157
- Setup the optimizer to tune the number of bits. In particular, this will deactivate
158
- weight decay for the bits parameters.
159
-
160
- Args:
161
- optimizer (torch.Optimizer): optimizer to use.
162
- lr (float): specific learning rate for the bits parameters. 1e-3
163
- is perfect for Adam.,w
164
- kwargs (dict): overrides for other optimization parameters for the bits.
165
- """
166
- assert not self._optimizer_setup
167
- self._optimizer_setup = True
168
-
169
- params = [qp.logit for qp in self._qparams]
170
-
171
- for group in optimizer.param_groups:
172
- for q in list(group["params"]):
173
- for p in params:
174
- if p is q:
175
- raise RuntimeError("You should create the optimizer "
176
- "before the quantizer!")
177
-
178
- group = {"params": params, "lr": lr, "weight_decay": 0}
179
- group.update(kwargs)
180
- optimizer.add_param_group(group)
181
-
182
- def no_optimizer(self):
183
- """
184
- Call this if you do not want to use an optimizer.
185
- """
186
- self._optimizer_setup = True
187
-
188
- def check_unused(self):
189
- for qparam in self._qparams:
190
- if qparam.other is not None:
191
- continue
192
- grad = qparam.param.grad
193
- if grad is None or (grad == 0).all():
194
- if qparam.logit.grad is not None:
195
- qparam.logit.grad.data.zero_()
196
-
197
- def model_size(self, exact=False):
198
- """
199
- Differentiable estimate of the model size.
200
- The size is returned in MB.
201
-
202
- If `exact` is True, then the output is no longer differentiable but
203
- reflect exactly an achievable size, even without compression,
204
- i.e.same as returned by `naive_model_size()`.
205
- """
206
- total = super().model_size()
207
- subtotal = 0
208
- for qparam in self._qparams:
209
- # only count the first appearance of a Parameter
210
- if qparam.other is not None:
211
- continue
212
- bits = self.extra_bits + self._get_bits(qparam.logit)
213
- if exact:
214
- bits = bits.round().clamp(1, 15)
215
- if self.group_size == 0:
216
- group_size = qparam.param.numel()
217
- else:
218
- group_size = self.group_size
219
- subtotal += group_size * bits.sum()
220
- subtotal += 2 * 32 # param scale
221
-
222
- # Number of bits to represent each number of bits
223
- bits_bits = math.ceil(math.log2(1 + (bits.max().round().item() - self.min_bits)))
224
- subtotal += 8 # 8 bits for bits_bits
225
- subtotal += bits_bits * bits.numel()
226
-
227
- subtotal /= 2 ** 20 * 8 # bits -> MegaBytes
228
- return total + subtotal
229
-
230
- def true_model_size(self):
231
- """
232
- Naive model size without zlib compression.
233
- """
234
- return self.model_size(exact=True).item()
235
-
236
- def _pre_forward_train(self):
237
- if not self._optimizer_setup:
238
- raise RuntimeError("You must call `setup_optimizer()` on your optimizer "
239
- "before starting training.")
240
- for qparam in self._qparams:
241
- if qparam.other is not None:
242
- noisy = qparam.other.module._parameters[qparam.other.name]
243
- else:
244
- bits = self._get_bits(qparam.logit)[:, None]
245
- if self.group_size == 0:
246
- p_flat = qparam.param.view(-1)
247
- else:
248
- p_flat = qparam.param.view(-1, self.group_size)
249
- scale = p_flat.max() - p_flat.min()
250
- unit = 1 / (2**bits - 1)
251
- if self.noise == "uniform":
252
- noise_source = (torch.rand_like(p_flat) - 0.5)
253
- elif self.noise == "gaussian":
254
- noise_source = torch.randn_like(p_flat) / 2
255
- noise = scale * unit * noise_source
256
- noisy = p_flat + noise
257
- # We bypass the checks by PyTorch on parameters being leafs
258
- qparam.module._parameters[qparam.name] = noisy.view_as(qparam.param)
259
- return True
260
-
261
- def _post_forward_train(self):
262
- for qparam in self._qparams:
263
- qparam.module._parameters[qparam.name] = qparam.param
264
- return True
265
-
266
- def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any:
267
- bits = self.extra_bits + self._get_bits(qparam.logit)
268
- bits = bits.round().clamp(1, 15)[:, None].byte()
269
- if self.group_size == 0:
270
- p = qparam.param.data.view(-1)
271
- else:
272
- p = qparam.param.data.view(-1, self.group_size)
273
- levels, scales = uniform_quantize(p, bits)
274
- return levels, scales, bits
275
-
276
- def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor:
277
- levels, param_scale, bits = quantized
278
- return uniform_unquantize(levels, param_scale, bits).view_as(qparam.param.data)
279
-
280
- def detach(self):
281
- super().detach()
282
- for qparam in self._qparams:
283
- delattr(qparam.module, qparam.name + self.suffix)
284
-
285
- def __repr__(self):
286
- return simple_repr(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/checkpoints/train_vq.py DELETED
@@ -1,171 +0,0 @@
1
- import os
2
- import json
3
-
4
- import torch
5
- import torch.optim as optim
6
- from torch.utils.tensorboard import SummaryWriter
7
-
8
- import models.vqvae as vqvae
9
- import utils.losses as losses
10
- import options.option_vq as option_vq
11
- import utils.utils_model as utils_model
12
- from dataset import dataset_VQ, dataset_TM_eval
13
- import utils.eval_trans as eval_trans
14
- from options.get_eval_option import get_opt
15
- from models.evaluator_wrapper import EvaluatorModelWrapper
16
- import warnings
17
- warnings.filterwarnings('ignore')
18
- from utils.word_vectorizer import WordVectorizer
19
-
20
- def update_lr_warm_up(optimizer, nb_iter, warm_up_iter, lr):
21
-
22
- current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1)
23
- for param_group in optimizer.param_groups:
24
- param_group["lr"] = current_lr
25
-
26
- return optimizer, current_lr
27
-
28
- ##### ---- Exp dirs ---- #####
29
- args = option_vq.get_args_parser()
30
- torch.manual_seed(args.seed)
31
-
32
- args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}')
33
- os.makedirs(args.out_dir, exist_ok = True)
34
-
35
- ##### ---- Logger ---- #####
36
- logger = utils_model.get_logger(args.out_dir)
37
- writer = SummaryWriter(args.out_dir)
38
- logger.info(json.dumps(vars(args), indent=4, sort_keys=True))
39
-
40
-
41
-
42
- w_vectorizer = WordVectorizer('./glove', 'our_vab')
43
-
44
- if args.dataname == 'kit' :
45
- dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt'
46
- args.nb_joints = 21
47
-
48
- else :
49
- dataset_opt_path = 'checkpoints/t2m/Comp_v6_KLD005/opt.txt'
50
- args.nb_joints = 22
51
-
52
- logger.info(f'Training on {args.dataname}, motions are with {args.nb_joints} joints')
53
-
54
- wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda'))
55
- eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
56
-
57
-
58
- ##### ---- Dataloader ---- #####
59
- train_loader = dataset_VQ.DATALoader(args.dataname,
60
- args.batch_size,
61
- window_size=args.window_size,
62
- unit_length=2**args.down_t)
63
-
64
- train_loader_iter = dataset_VQ.cycle(train_loader)
65
-
66
- val_loader = dataset_TM_eval.DATALoader(args.dataname, False,
67
- 32,
68
- w_vectorizer,
69
- unit_length=2**args.down_t)
70
-
71
- ##### ---- Network ---- #####
72
- net = vqvae.HumanVQVAE(args, ## use args to define different parameters in different quantizers
73
- args.nb_code,
74
- args.code_dim,
75
- args.output_emb_width,
76
- args.down_t,
77
- args.stride_t,
78
- args.width,
79
- args.depth,
80
- args.dilation_growth_rate,
81
- args.vq_act,
82
- args.vq_norm)
83
-
84
-
85
- if args.resume_pth :
86
- logger.info('loading checkpoint from {}'.format(args.resume_pth))
87
- ckpt = torch.load(args.resume_pth, map_location='cpu')
88
- net.load_state_dict(ckpt['net'], strict=True)
89
- net.train()
90
- net.cuda()
91
-
92
- ##### ---- Optimizer & Scheduler ---- #####
93
- optimizer = optim.AdamW(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.weight_decay)
94
- scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_scheduler, gamma=args.gamma)
95
-
96
-
97
- Loss = losses.ReConsLoss(args.recons_loss, args.nb_joints)
98
-
99
- ##### ------ warm-up ------- #####
100
- avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
101
-
102
- for nb_iter in range(1, args.warm_up_iter):
103
-
104
- optimizer, current_lr = update_lr_warm_up(optimizer, nb_iter, args.warm_up_iter, args.lr)
105
-
106
- gt_motion = next(train_loader_iter)
107
- gt_motion = gt_motion.cuda().float() # (bs, 64, dim)
108
-
109
- pred_motion, loss_commit, perplexity = net(gt_motion)
110
- loss_motion = Loss(pred_motion, gt_motion)
111
- loss_vel = Loss.forward_vel(pred_motion, gt_motion)
112
-
113
- loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel
114
-
115
- optimizer.zero_grad()
116
- loss.backward()
117
- optimizer.step()
118
-
119
- avg_recons += loss_motion.item()
120
- avg_perplexity += perplexity.item()
121
- avg_commit += loss_commit.item()
122
-
123
- if nb_iter % args.print_iter == 0 :
124
- avg_recons /= args.print_iter
125
- avg_perplexity /= args.print_iter
126
- avg_commit /= args.print_iter
127
-
128
- logger.info(f"Warmup. Iter {nb_iter} : lr {current_lr:.5f} \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}")
129
-
130
- avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
131
-
132
- ##### ---- Training ---- #####
133
- avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
134
- best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, 0, best_fid=1000, best_iter=0, best_div=100, best_top1=0, best_top2=0, best_top3=0, best_matching=100, eval_wrapper=eval_wrapper)
135
-
136
- for nb_iter in range(1, args.total_iter + 1):
137
-
138
- gt_motion = next(train_loader_iter)
139
- gt_motion = gt_motion.cuda().float() # bs, nb_joints, joints_dim, seq_len
140
-
141
- pred_motion, loss_commit, perplexity = net(gt_motion)
142
- loss_motion = Loss(pred_motion, gt_motion)
143
- loss_vel = Loss.forward_vel(pred_motion, gt_motion)
144
-
145
- loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel
146
-
147
- optimizer.zero_grad()
148
- loss.backward()
149
- optimizer.step()
150
- scheduler.step()
151
-
152
- avg_recons += loss_motion.item()
153
- avg_perplexity += perplexity.item()
154
- avg_commit += loss_commit.item()
155
-
156
- if nb_iter % args.print_iter == 0 :
157
- avg_recons /= args.print_iter
158
- avg_perplexity /= args.print_iter
159
- avg_commit /= args.print_iter
160
-
161
- writer.add_scalar('./Train/L1', avg_recons, nb_iter)
162
- writer.add_scalar('./Train/PPL', avg_perplexity, nb_iter)
163
- writer.add_scalar('./Train/Commit', avg_commit, nb_iter)
164
-
165
- logger.info(f"Train. Iter {nb_iter} : \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}")
166
-
167
- avg_recons, avg_perplexity, avg_commit = 0., 0., 0.,
168
-
169
- if nb_iter % args.eval_iter==0 :
170
- best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, eval_wrapper=eval_wrapper)
171
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/ldm/modules/distributions/__init__.py DELETED
File without changes
spaces/Abhishek92kumar/layoutlmv3-finetuned-cord_100/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Layoutlmv3-finetuned-cord 100
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/callback/+page.server.ts DELETED
@@ -1,39 +0,0 @@
1
- import { redirect, error } from "@sveltejs/kit";
2
- import { getOIDCUserData, validateAndParseCsrfToken } from "$lib/server/auth";
3
- import { z } from "zod";
4
- import { base } from "$app/paths";
5
- import { updateUser } from "./updateUser";
6
-
7
- export async function load({ url, locals, cookies }) {
8
- const { error: errorName, error_description: errorDescription } = z
9
- .object({
10
- error: z.string().optional(),
11
- error_description: z.string().optional(),
12
- })
13
- .parse(Object.fromEntries(url.searchParams.entries()));
14
-
15
- if (errorName) {
16
- throw error(400, errorName + (errorDescription ? ": " + errorDescription : ""));
17
- }
18
-
19
- const { code, state } = z
20
- .object({
21
- code: z.string(),
22
- state: z.string(),
23
- })
24
- .parse(Object.fromEntries(url.searchParams.entries()));
25
-
26
- const csrfToken = Buffer.from(state, "base64").toString("utf-8");
27
-
28
- const validatedToken = await validateAndParseCsrfToken(csrfToken, locals.sessionId);
29
-
30
- if (!validatedToken) {
31
- throw error(403, "Invalid or expired CSRF token");
32
- }
33
-
34
- const { userData } = await getOIDCUserData({ redirectURI: validatedToken.redirectUrl }, code);
35
-
36
- await updateUser({ userData, locals, cookies });
37
-
38
- throw redirect(302, `${base}/`);
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/utils.py DELETED
@@ -1,189 +0,0 @@
1
- """Utils for monoDepth."""
2
- import sys
3
- import re
4
- import numpy as np
5
- import cv2
6
- import torch
7
-
8
-
9
- def read_pfm(path):
10
- """Read pfm file.
11
-
12
- Args:
13
- path (str): path to file
14
-
15
- Returns:
16
- tuple: (data, scale)
17
- """
18
- with open(path, "rb") as file:
19
-
20
- color = None
21
- width = None
22
- height = None
23
- scale = None
24
- endian = None
25
-
26
- header = file.readline().rstrip()
27
- if header.decode("ascii") == "PF":
28
- color = True
29
- elif header.decode("ascii") == "Pf":
30
- color = False
31
- else:
32
- raise Exception("Not a PFM file: " + path)
33
-
34
- dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
35
- if dim_match:
36
- width, height = list(map(int, dim_match.groups()))
37
- else:
38
- raise Exception("Malformed PFM header.")
39
-
40
- scale = float(file.readline().decode("ascii").rstrip())
41
- if scale < 0:
42
- # little-endian
43
- endian = "<"
44
- scale = -scale
45
- else:
46
- # big-endian
47
- endian = ">"
48
-
49
- data = np.fromfile(file, endian + "f")
50
- shape = (height, width, 3) if color else (height, width)
51
-
52
- data = np.reshape(data, shape)
53
- data = np.flipud(data)
54
-
55
- return data, scale
56
-
57
-
58
- def write_pfm(path, image, scale=1):
59
- """Write pfm file.
60
-
61
- Args:
62
- path (str): pathto file
63
- image (array): data
64
- scale (int, optional): Scale. Defaults to 1.
65
- """
66
-
67
- with open(path, "wb") as file:
68
- color = None
69
-
70
- if image.dtype.name != "float32":
71
- raise Exception("Image dtype must be float32.")
72
-
73
- image = np.flipud(image)
74
-
75
- if len(image.shape) == 3 and image.shape[2] == 3: # color image
76
- color = True
77
- elif (
78
- len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
79
- ): # greyscale
80
- color = False
81
- else:
82
- raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
83
-
84
- file.write("PF\n" if color else "Pf\n".encode())
85
- file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
86
-
87
- endian = image.dtype.byteorder
88
-
89
- if endian == "<" or endian == "=" and sys.byteorder == "little":
90
- scale = -scale
91
-
92
- file.write("%f\n".encode() % scale)
93
-
94
- image.tofile(file)
95
-
96
-
97
- def read_image(path):
98
- """Read image and output RGB image (0-1).
99
-
100
- Args:
101
- path (str): path to file
102
-
103
- Returns:
104
- array: RGB image (0-1)
105
- """
106
- img = cv2.imread(path)
107
-
108
- if img.ndim == 2:
109
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
110
-
111
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
112
-
113
- return img
114
-
115
-
116
- def resize_image(img):
117
- """Resize image and make it fit for network.
118
-
119
- Args:
120
- img (array): image
121
-
122
- Returns:
123
- tensor: data ready for network
124
- """
125
- height_orig = img.shape[0]
126
- width_orig = img.shape[1]
127
-
128
- if width_orig > height_orig:
129
- scale = width_orig / 384
130
- else:
131
- scale = height_orig / 384
132
-
133
- height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
134
- width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
135
-
136
- img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
137
-
138
- img_resized = (
139
- torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
140
- )
141
- img_resized = img_resized.unsqueeze(0)
142
-
143
- return img_resized
144
-
145
-
146
- def resize_depth(depth, width, height):
147
- """Resize depth map and bring to CPU (numpy).
148
-
149
- Args:
150
- depth (tensor): depth
151
- width (int): image width
152
- height (int): image height
153
-
154
- Returns:
155
- array: processed depth
156
- """
157
- depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
158
-
159
- depth_resized = cv2.resize(
160
- depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
161
- )
162
-
163
- return depth_resized
164
-
165
- def write_depth(path, depth, bits=1):
166
- """Write depth map to pfm and png file.
167
-
168
- Args:
169
- path (str): filepath without extension
170
- depth (array): depth
171
- """
172
- write_pfm(path + ".pfm", depth.astype(np.float32))
173
-
174
- depth_min = depth.min()
175
- depth_max = depth.max()
176
-
177
- max_val = (2**(8*bits))-1
178
-
179
- if depth_max - depth_min > np.finfo("float").eps:
180
- out = max_val * (depth - depth_min) / (depth_max - depth_min)
181
- else:
182
- out = np.zeros(depth.shape, dtype=depth.type)
183
-
184
- if bits == 1:
185
- cv2.imwrite(path + ".png", out.astype("uint8"))
186
- elif bits == 2:
187
- cv2.imwrite(path + ".png", out.astype("uint16"))
188
-
189
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdithyaSNair/Dog_breed_predictor/app.py DELETED
@@ -1,40 +0,0 @@
1
- #Library imports
2
- import numpy as np
3
- import streamlit as st
4
- import cv2
5
- from keras.models import load_model
6
-
7
-
8
- #Loading the Model
9
- model = load_model('dog_breed.h5')
10
-
11
- #Name of Classes
12
- CLASS_NAMES = ["scottish_deerhound","maltese_dog","afghan_hound","entlebucher","bernese_mountain_dog"]
13
-
14
- #Setting Title of App
15
- st.title("Dog Breed Prediction")
16
- st.markdown("Upload an image of the dog")
17
-
18
- #Uploading the dog image
19
- dog_image = st.file_uploader("Choose an image...", type="png")
20
- submit = st.button('Predict')
21
- #On predict button click
22
- if submit:
23
-
24
-
25
- if dog_image is not None:
26
-
27
- # Convert the file to an opencv image.
28
- file_bytes = np.asarray(bytearray(dog_image.read()), dtype=np.uint8)
29
- opencv_image = cv2.imdecode(file_bytes, 1)
30
-
31
- # Displaying the image
32
- st.image(opencv_image, channels="BGR")
33
- #Resizing the image
34
- opencv_image = cv2.resize(opencv_image, (224,224))
35
- #Convert image to 4 Dimension
36
- opencv_image.shape = (1,224,224,3)
37
- #Make Prediction
38
- Y_pred = model.predict(opencv_image)
39
-
40
- st.title(str("The Dog Breed is "+CLASS_NAMES[np.argmax(Y_pred)]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/methods/InputMethods.js DELETED
@@ -1,26 +0,0 @@
1
- export default {
2
- getSelectedChess1() {
3
- return this.mainState.selectedChess1;
4
- },
5
-
6
- getSelectedChess2() {
7
- return this.mainState.selectedChess2;
8
- },
9
-
10
- selectChess1(chess) {
11
- this.mainState.selectChess1(chess);
12
- return this;
13
- },
14
-
15
- selectChess2(chess) {
16
- this.mainState.selectChess2(chess);
17
- return this;
18
- },
19
-
20
- setInputEnable(enable) {
21
- if (this.input) {
22
- this.input.setEnable(enable);
23
- }
24
- return this;
25
- },
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/GetEaseConfig.js DELETED
@@ -1,10 +0,0 @@
1
- var GetEaseConfig = function (easeConfig, menu) {
2
- if (easeConfig.sameOrientation) {
3
- easeConfig.orientation = menu.orientation;
4
- } else {
5
- easeConfig.orientation = (menu.orientation === 0) ? 1 : 0;
6
- }
7
- return easeConfig;
8
- }
9
-
10
- export default GetEaseConfig;
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import Slider from './Slider';
2
-
3
- export default function (
4
- config?: Slider.IConfig
5
- ): Slider;
 
 
 
 
 
 
spaces/AhmedKhairullah/dmo/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Dmo
3
- emoji: 😻
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/server.js DELETED
@@ -1,287 +0,0 @@
1
- import fastify from 'fastify'
2
- import cors from '@fastify/cors'
3
- import multipart from '@fastify/multipart'
4
- import fstatic from '@fastify/static'
5
- import websocket from '@fastify/websocket'
6
- import path from 'path'
7
- import fs from 'fs'
8
-
9
- import { getPttBuffer, launchBrowser, screenshot, checkWebsite, getPublicIP } from './common.js'
10
-
11
- const serverPort = 3000
12
- const __dirname = path.resolve()
13
-
14
- const server = fastify({
15
- bodyLimit: 30 * 1024 * 1024, //30m文件限制
16
- logger: true
17
- })
18
- await server.register(multipart)
19
- await server.register(cors, {
20
- origin: '*'
21
- })
22
- await server.register(websocket, {
23
- cors: true,
24
- options: {
25
- maxPayload: 1048576
26
- }
27
- })
28
- server.get('/', (request, reply) => {
29
- fs.readFile("./index.html", (err, data) => {
30
- if (err) {
31
- // 如果出错,返回错误信息
32
- reply.send(err)
33
- } else {
34
- // 如果成功,设置响应的内容类型为text/html,并发送响应
35
- reply.type("text/html").send(data)
36
- }
37
- })
38
- })
39
- server.post('*', (request, reply) => {
40
- reply.send({
41
- state: 'error',
42
- code: '404',
43
- url: request.url.trim(),
44
- error: `无效的访问接口`
45
- })
46
- })
47
- // 云语音转码
48
- server.post('/audio', async (request, reply) => {
49
- let result
50
- if (request.headers['content-type'].includes('multipart/form-data')) {
51
- const files = await request.saveRequestFiles()
52
- if (files.length > 0) {
53
- result = await getPttBuffer(files[0].filepath)
54
- if (result) {
55
- reply.send(result.buffer)
56
- return
57
- }
58
- }
59
- else {
60
- reply.send({ error: '无文件' })
61
- return
62
- }
63
- } else {
64
- const body = request.body || {}
65
- if (body.recordBuffer && body.recordBuffer.type === 'Buffer') {
66
- const buffer = Buffer.from(body.recordBuffer.data)
67
- result = await getPttBuffer(buffer)
68
- } else if (body.recordUrl) {
69
- result = await getPttBuffer(body.recordUrl)
70
- }
71
- }
72
- if (!result?.buffer) {
73
- reply.send({ error: '转码失败' })
74
- } else {
75
- reply.send(result)
76
- }
77
- })
78
- // 云网页截图
79
- server.post('/screenshot', async (request, reply) => {
80
- const body = request.body || {}
81
- if (body.url) {
82
- const url = body.url.trim()
83
- if (/^https?:\/\/.+/.test(url)) {
84
- if (!await checkWebsite(url)) {
85
- reply.send({ error: '错误:无法访问指定页面' })
86
- return
87
- }
88
- let base64 = await screenshot(url, body.option || {})
89
- if (base64) {
90
- if (body.type === 'image') {
91
- const image = Buffer.from(base64, "base64")
92
- reply.type("image/png")
93
- reply.send(image)
94
- } else {
95
- reply.send({ url: url, base64: base64 })
96
- }
97
- } else {
98
- reply.send({ error: '错误:浏览器崩溃' })
99
- }
100
- } else {
101
- reply.send({ error: '错误:请输入一个合法的网址' })
102
- }
103
- } else {
104
- reply.send({ error: '错误:无效参数' })
105
- }
106
- })
107
- // 网址检查
108
- server.post('/check', async (request, reply) => {
109
- const body = request.body || {}
110
- if (!body.url) {
111
- reply.send({ state: 'error', error: '参数错误' })
112
- return
113
- }
114
- if (await checkWebsite(body.url)) {
115
- reply.send({ state: 'ok' })
116
- } else {
117
- reply.send({ state: 'error', error: '内容服务器无法正常访问,请检查外网端口是否开放' })
118
- }
119
- })
120
-
121
- // chatgpt插件
122
- await server.register(fstatic, {
123
- root: path.join(__dirname, 'resources/chatgpt-plugin/')
124
- })
125
- await server.get('/page/*', (request, reply) => {
126
- const stream = fs.createReadStream('resources/chatgpt-plugin/index.html')
127
- reply.type('text/html').send(stream)
128
- })
129
- server.post('/page', async (request, reply) => {
130
- const body = request.body || {}
131
- if (body.code) {
132
- const dir = 'cache/ChatGPTCache'
133
- const filename = body.code + '.json'
134
- const filepath = path.join(dir, filename)
135
- let data = fs.readFileSync(filepath, 'utf8')
136
- reply.send(data)
137
- }
138
- })
139
- server.post('/cache', async (request, reply) => {
140
- const body = request.body || {}
141
- if (body.content) {
142
- const dir = 'cache/ChatGPTCache'
143
- const filename = body.entry + '.json'
144
- const filepath = path.join(dir, filename)
145
- const ip = await getPublicIP()
146
- let botName = ''
147
- switch (body.model) {
148
- case 'bing':
149
- botName = 'Bing'
150
- break
151
- case 'api':
152
- botName = 'ChatGPT'
153
- break
154
- case 'api3':
155
- botName = 'ChatGPT'
156
- break
157
- case 'browser':
158
- botName = 'ChatGPT'
159
- break
160
- case 'chatglm':
161
- botName = 'ChatGLM'
162
- break
163
- case 'claude':
164
- botName = 'Claude'
165
- break
166
- default:
167
- botName = body.model
168
- break
169
- }
170
- try {
171
- fs.mkdirSync(dir, { recursive: true })
172
- const data = {
173
- user: body.content.senderName,
174
- bot: body.chatViewBotName || botName,
175
- userImg: body.userImg || '',
176
- botImg: body.botImg || '',
177
- question: body.content.prompt,
178
- message: body.content.content,
179
- group: body.content.group,
180
- herf: `http://${ip + ':' + serverPort}/page/${body.entry}`,
181
- quote: body.content.quote,
182
- images: body.content.images || [],
183
- suggest: body.content.suggest || [],
184
- model: body.model,
185
- mood: body.content.mood || 'blandness',
186
- live2d: false,
187
- time: new Date()
188
- }
189
- fs.writeFileSync(filepath, JSON.stringify(data))
190
- reply.send({ file: body.entry, cacheUrl: `http://${ip + ':' + serverPort}/page/${body.entry}` })
191
- } catch (err) {
192
- server.log.error(`用户生成缓存${body.entry}时发生错误: ${err}`)
193
- reply.send({ file: body.entry, cacheUrl: `http://${ip + ':' + serverPort}/page/${body.entry}`, error: body.entry + '生成失败' })
194
- }
195
- }
196
- })
197
-
198
- // websocket转发
199
- let clients = []
200
- let servers = []
201
- const wsFn = async (connection, request) => {
202
- connection.socket.on('open', message => {
203
- // 开始连接
204
- console.log(`Received message: ${message}`)
205
- const response = { data: 'hello, client' }
206
- connection.socket.send(JSON.stringify(response))
207
- })
208
- connection.socket.on('message', async (message) => {
209
- try {
210
- const data = JSON.parse(message)
211
- if (!data.type) {
212
- await connection.socket.send(JSON.stringify({ data: data, error: '当前为media数据中转服务,请明确链接类型后再次发送!' }))
213
- return
214
- }
215
- // 注册链接
216
- if (data.command === 'register') {
217
- if (data.region) {
218
- if (data.type === 'server') {
219
- const serverToken = Math.random().toString(36).slice(2, 18)
220
- servers[data.region] = {
221
- region: data.region,
222
- client: connection.socket,
223
- token: serverToken,
224
- }
225
- await connection.socket.send(JSON.stringify({ command: data.command, state: true, token: serverToken }))
226
- } else if (data.type === 'client' && data.serverToken) {
227
- if (servers[data.region] && servers[data.region].token === data.serverToken) {
228
- clients[data.region] = {
229
- region: data.region,
230
- user: data.user,
231
- client: connection.socket,
232
- }
233
- await connection.socket.send(JSON.stringify({ command: data.command, state: true }))
234
- } else {
235
- await connection.socket.send(JSON.stringify({ command: data.command, state: false }))
236
- }
237
- } else {
238
- await connection.socket.send(JSON.stringify({ command: data.command, state: false }))
239
- }
240
- } else {
241
- await connection.socket.send(JSON.stringify({ command: data.command, state: false }))
242
- }
243
- return
244
- }
245
- // 客户端数据转发
246
- if (data.type === 'client' && data.serverToken) {
247
- if (servers[data.region] && servers[data.region].token === data.serverToken) {
248
- await servers[data.region].client.send(JSON.stringify(data))
249
- } else {
250
- await connection.socket.send(JSON.stringify({ state: false, error: '服务区未注册或验证错误' }))
251
- }
252
- } else if (data.type === 'server') {
253
- if (clients[data.region]) {
254
- await clients[data.region].client.send(JSON.stringify(data))
255
- } else {
256
- await connection.socket.send(JSON.stringify({ state: false, error: '客户端未注册' }))
257
- }
258
- }
259
-
260
- } catch (error) {
261
- await connection.socket.send(JSON.stringify({ "error": error.message }))
262
- }
263
- })
264
- connection.socket.on('close', () => {
265
- // 监听连接关闭事件
266
- const response = { code: 403, data: 'Client disconnected', message: 'Client disconnected' }
267
- connection.socket.send(JSON.stringify(response))
268
- })
269
- return request
270
- }
271
- server.get('/ws', {
272
- websocket: true
273
- }, wsFn)
274
-
275
- server.listen({
276
- port: serverPort,
277
- host: '::'
278
- }, async (error) => {
279
- if (error) {
280
- server.log.error(`server start fail: ${error}`)
281
- } else {
282
- server.log.info(`server listening on ${server.server.address().port}`)
283
- await launchBrowser()
284
- // 设置一个定时器,每隔一段时间(比如10分钟)就重启浏览器回收垃圾
285
- setInterval(launchBrowser, 10 * 60 * 1000)
286
- }
287
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/criteria/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/model.py DELETED
@@ -1,674 +0,0 @@
1
- import math
2
- import random
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from .op.fused_act import FusedLeakyReLU, fused_leaky_relu
8
- from .op.upfirdn2d import upfirdn2d
9
-
10
-
11
- class PixelNorm(nn.Module):
12
- def __init__(self):
13
- super().__init__()
14
-
15
- def forward(self, input):
16
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
17
-
18
-
19
- def make_kernel(k):
20
- k = torch.tensor(k, dtype=torch.float32)
21
-
22
- if k.ndim == 1:
23
- k = k[None, :] * k[:, None]
24
-
25
- k /= k.sum()
26
-
27
- return k
28
-
29
-
30
- class Upsample(nn.Module):
31
- def __init__(self, kernel, factor=2):
32
- super().__init__()
33
-
34
- self.factor = factor
35
- kernel = make_kernel(kernel) * (factor ** 2)
36
- self.register_buffer('kernel', kernel)
37
-
38
- p = kernel.shape[0] - factor
39
-
40
- pad0 = (p + 1) // 2 + factor - 1
41
- pad1 = p // 2
42
-
43
- self.pad = (pad0, pad1)
44
-
45
- def forward(self, input):
46
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
47
-
48
- return out
49
-
50
-
51
- class Downsample(nn.Module):
52
- def __init__(self, kernel, factor=2):
53
- super().__init__()
54
-
55
- self.factor = factor
56
- kernel = make_kernel(kernel)
57
- self.register_buffer('kernel', kernel)
58
-
59
- p = kernel.shape[0] - factor
60
-
61
- pad0 = (p + 1) // 2
62
- pad1 = p // 2
63
-
64
- self.pad = (pad0, pad1)
65
-
66
- def forward(self, input):
67
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
68
-
69
- return out
70
-
71
-
72
- class Blur(nn.Module):
73
- def __init__(self, kernel, pad, upsample_factor=1):
74
- super().__init__()
75
-
76
- kernel = make_kernel(kernel)
77
-
78
- if upsample_factor > 1:
79
- kernel = kernel * (upsample_factor ** 2)
80
-
81
- self.register_buffer('kernel', kernel)
82
-
83
- self.pad = pad
84
-
85
- def forward(self, input):
86
- out = upfirdn2d(input, self.kernel, pad=self.pad)
87
-
88
- return out
89
-
90
-
91
- class EqualConv2d(nn.Module):
92
- def __init__(
93
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
94
- ):
95
- super().__init__()
96
-
97
- self.weight = nn.Parameter(
98
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
99
- )
100
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
101
-
102
- self.stride = stride
103
- self.padding = padding
104
-
105
- if bias:
106
- self.bias = nn.Parameter(torch.zeros(out_channel))
107
-
108
- else:
109
- self.bias = None
110
-
111
- def forward(self, input):
112
- out = F.conv2d(
113
- input,
114
- self.weight * self.scale,
115
- bias=self.bias,
116
- stride=self.stride,
117
- padding=self.padding,
118
- )
119
-
120
- return out
121
-
122
- def __repr__(self):
123
- return (
124
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
125
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
126
- )
127
-
128
-
129
- class EqualLinear(nn.Module):
130
- def __init__(
131
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
132
- ):
133
- super().__init__()
134
-
135
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
136
-
137
- if bias:
138
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
139
-
140
- else:
141
- self.bias = None
142
-
143
- self.activation = activation
144
-
145
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
146
- self.lr_mul = lr_mul
147
-
148
- def forward(self, input):
149
- if self.activation:
150
- out = F.linear(input, self.weight * self.scale)
151
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
152
-
153
- else:
154
- out = F.linear(
155
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
156
- )
157
-
158
- return out
159
-
160
- def __repr__(self):
161
- return (
162
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
163
- )
164
-
165
-
166
- class ScaledLeakyReLU(nn.Module):
167
- def __init__(self, negative_slope=0.2):
168
- super().__init__()
169
-
170
- self.negative_slope = negative_slope
171
-
172
- def forward(self, input):
173
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
174
-
175
- return out * math.sqrt(2)
176
-
177
-
178
- class ModulatedConv2d(nn.Module):
179
- def __init__(
180
- self,
181
- in_channel,
182
- out_channel,
183
- kernel_size,
184
- style_dim,
185
- demodulate=True,
186
- upsample=False,
187
- downsample=False,
188
- blur_kernel=[1, 3, 3, 1],
189
- ):
190
- super().__init__()
191
-
192
- self.eps = 1e-8
193
- self.kernel_size = kernel_size
194
- self.in_channel = in_channel
195
- self.out_channel = out_channel
196
- self.upsample = upsample
197
- self.downsample = downsample
198
-
199
- if upsample:
200
- factor = 2
201
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
202
- pad0 = (p + 1) // 2 + factor - 1
203
- pad1 = p // 2 + 1
204
-
205
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
206
-
207
- if downsample:
208
- factor = 2
209
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
210
- pad0 = (p + 1) // 2
211
- pad1 = p // 2
212
-
213
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
214
-
215
- fan_in = in_channel * kernel_size ** 2
216
- self.scale = 1 / math.sqrt(fan_in)
217
- self.padding = kernel_size // 2
218
-
219
- self.weight = nn.Parameter(
220
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
221
- )
222
-
223
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
224
-
225
- self.demodulate = demodulate
226
-
227
- def __repr__(self):
228
- return (
229
- f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
230
- f'upsample={self.upsample}, downsample={self.downsample})'
231
- )
232
-
233
- def forward(self, input, style):
234
- batch, in_channel, height, width = input.shape
235
-
236
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
237
- weight = self.scale * self.weight * style
238
-
239
- if self.demodulate:
240
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
241
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
242
-
243
- weight = weight.view(
244
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
245
- )
246
-
247
- if self.upsample:
248
- input = input.view(1, batch * in_channel, height, width)
249
- weight = weight.view(
250
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
251
- )
252
- weight = weight.transpose(1, 2).reshape(
253
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
254
- )
255
- out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
256
- _, _, height, width = out.shape
257
- out = out.view(batch, self.out_channel, height, width)
258
- out = self.blur(out)
259
-
260
- elif self.downsample:
261
- input = self.blur(input)
262
- _, _, height, width = input.shape
263
- input = input.view(1, batch * in_channel, height, width)
264
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
265
- _, _, height, width = out.shape
266
- out = out.view(batch, self.out_channel, height, width)
267
-
268
- else:
269
- input = input.view(1, batch * in_channel, height, width)
270
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
271
- _, _, height, width = out.shape
272
- out = out.view(batch, self.out_channel, height, width)
273
-
274
- return out
275
-
276
-
277
- class NoiseInjection(nn.Module):
278
- def __init__(self):
279
- super().__init__()
280
-
281
- self.weight = nn.Parameter(torch.zeros(1))
282
-
283
- def forward(self, image, noise=None):
284
- if noise is None:
285
- batch, _, height, width = image.shape
286
- noise = image.new_empty(batch, 1, height, width).normal_()
287
-
288
- return image + self.weight * noise
289
-
290
-
291
- class ConstantInput(nn.Module):
292
- def __init__(self, channel, size=4):
293
- super().__init__()
294
-
295
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
296
-
297
- def forward(self, input):
298
- batch = input.shape[0]
299
- out = self.input.repeat(batch, 1, 1, 1)
300
-
301
- return out
302
-
303
-
304
- class StyledConv(nn.Module):
305
- def __init__(
306
- self,
307
- in_channel,
308
- out_channel,
309
- kernel_size,
310
- style_dim,
311
- upsample=False,
312
- blur_kernel=[1, 3, 3, 1],
313
- demodulate=True,
314
- ):
315
- super().__init__()
316
-
317
- self.conv = ModulatedConv2d(
318
- in_channel,
319
- out_channel,
320
- kernel_size,
321
- style_dim,
322
- upsample=upsample,
323
- blur_kernel=blur_kernel,
324
- demodulate=demodulate,
325
- )
326
-
327
- self.noise = NoiseInjection()
328
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
329
- # self.activate = ScaledLeakyReLU(0.2)
330
- self.activate = FusedLeakyReLU(out_channel)
331
-
332
- def forward(self, input, style, noise=None):
333
- out = self.conv(input, style)
334
- out = self.noise(out, noise=noise)
335
- # out = out + self.bias
336
- out = self.activate(out)
337
-
338
- return out
339
-
340
-
341
- class ToRGB(nn.Module):
342
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
343
- super().__init__()
344
-
345
- if upsample:
346
- self.upsample = Upsample(blur_kernel)
347
-
348
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
349
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
350
-
351
- def forward(self, input, style, skip=None):
352
- out = self.conv(input, style)
353
- out = out + self.bias
354
-
355
- if skip is not None:
356
- skip = self.upsample(skip)
357
-
358
- out = out + skip
359
-
360
- return out
361
-
362
-
363
- class Generator(nn.Module):
364
- def __init__(
365
- self,
366
- size,
367
- style_dim,
368
- n_mlp,
369
- channel_multiplier=2,
370
- blur_kernel=[1, 3, 3, 1],
371
- lr_mlp=0.01,
372
- ):
373
- super().__init__()
374
-
375
- self.size = size
376
-
377
- self.style_dim = style_dim
378
-
379
- layers = [PixelNorm()]
380
-
381
- for i in range(n_mlp):
382
- layers.append(
383
- EqualLinear(
384
- style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
385
- )
386
- )
387
-
388
- self.style = nn.Sequential(*layers)
389
-
390
- self.channels = {
391
- 4: 512,
392
- 8: 512,
393
- 16: 512,
394
- 32: 512,
395
- 64: 256 * channel_multiplier,
396
- 128: 128 * channel_multiplier,
397
- 256: 64 * channel_multiplier,
398
- 512: 32 * channel_multiplier,
399
- 1024: 16 * channel_multiplier,
400
- }
401
-
402
- self.input = ConstantInput(self.channels[4])
403
- self.conv1 = StyledConv(
404
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
405
- )
406
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
407
-
408
- self.log_size = int(math.log(size, 2))
409
- self.num_layers = (self.log_size - 2) * 2 + 1
410
-
411
- self.convs = nn.ModuleList()
412
- self.upsamples = nn.ModuleList()
413
- self.to_rgbs = nn.ModuleList()
414
- self.noises = nn.Module()
415
-
416
- in_channel = self.channels[4]
417
-
418
- for layer_idx in range(self.num_layers):
419
- res = (layer_idx + 5) // 2
420
- shape = [1, 1, 2 ** res, 2 ** res]
421
- self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
422
-
423
- for i in range(3, self.log_size + 1):
424
- out_channel = self.channels[2 ** i]
425
-
426
- self.convs.append(
427
- StyledConv(
428
- in_channel,
429
- out_channel,
430
- 3,
431
- style_dim,
432
- upsample=True,
433
- blur_kernel=blur_kernel,
434
- )
435
- )
436
-
437
- self.convs.append(
438
- StyledConv(
439
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
440
- )
441
- )
442
-
443
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
444
-
445
- in_channel = out_channel
446
-
447
- self.n_latent = self.log_size * 2 - 2
448
-
449
- def make_noise(self):
450
- device = self.input.input.device
451
-
452
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
453
-
454
- for i in range(3, self.log_size + 1):
455
- for _ in range(2):
456
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
457
-
458
- return noises
459
-
460
- def mean_latent(self, n_latent):
461
- latent_in = torch.randn(
462
- n_latent, self.style_dim, device=self.input.input.device
463
- )
464
- latent = self.style(latent_in).mean(0, keepdim=True)
465
-
466
- return latent
467
-
468
- def get_latent(self, input):
469
- return self.style(input)
470
-
471
- def forward(
472
- self,
473
- styles,
474
- return_latents=False,
475
- return_features=False,
476
- inject_index=None,
477
- truncation=1,
478
- truncation_latent=None,
479
- input_is_latent=False,
480
- noise=None,
481
- randomize_noise=True,
482
- ):
483
- if not input_is_latent:
484
- styles = [self.style(s) for s in styles]
485
-
486
- if noise is None:
487
- if randomize_noise:
488
- noise = [None] * self.num_layers
489
- else:
490
- noise = [
491
- getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
492
- ]
493
-
494
- if truncation < 1:
495
- style_t = []
496
-
497
- for style in styles:
498
- style_t.append(
499
- truncation_latent + truncation * (style - truncation_latent)
500
- )
501
-
502
- styles = style_t
503
-
504
- if len(styles) < 2:
505
- inject_index = self.n_latent
506
-
507
- if styles[0].ndim < 3:
508
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
509
- else:
510
- latent = styles[0]
511
-
512
- else:
513
- if inject_index is None:
514
- inject_index = random.randint(1, self.n_latent - 1)
515
-
516
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
517
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
518
-
519
- latent = torch.cat([latent, latent2], 1)
520
-
521
- out = self.input(latent)
522
- out = self.conv1(out, latent[:, 0], noise=noise[0])
523
-
524
- skip = self.to_rgb1(out, latent[:, 1])
525
-
526
- i = 1
527
- for conv1, conv2, noise1, noise2, to_rgb in zip(
528
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
529
- ):
530
- out = conv1(out, latent[:, i], noise=noise1)
531
- out = conv2(out, latent[:, i + 1], noise=noise2)
532
- skip = to_rgb(out, latent[:, i + 2], skip)
533
-
534
- i += 2
535
-
536
- image = skip
537
-
538
- if return_latents:
539
- return image, latent
540
- elif return_features:
541
- return image, out
542
- else:
543
- return image, None
544
-
545
-
546
- class ConvLayer(nn.Sequential):
547
- def __init__(
548
- self,
549
- in_channel,
550
- out_channel,
551
- kernel_size,
552
- downsample=False,
553
- blur_kernel=[1, 3, 3, 1],
554
- bias=True,
555
- activate=True,
556
- ):
557
- layers = []
558
-
559
- if downsample:
560
- factor = 2
561
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
562
- pad0 = (p + 1) // 2
563
- pad1 = p // 2
564
-
565
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
566
-
567
- stride = 2
568
- self.padding = 0
569
-
570
- else:
571
- stride = 1
572
- self.padding = kernel_size // 2
573
-
574
- layers.append(
575
- EqualConv2d(
576
- in_channel,
577
- out_channel,
578
- kernel_size,
579
- padding=self.padding,
580
- stride=stride,
581
- bias=bias and not activate,
582
- )
583
- )
584
-
585
- if activate:
586
- if bias:
587
- layers.append(FusedLeakyReLU(out_channel))
588
-
589
- else:
590
- layers.append(ScaledLeakyReLU(0.2))
591
-
592
- super().__init__(*layers)
593
-
594
-
595
- class ResBlock(nn.Module):
596
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
597
- super().__init__()
598
-
599
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
600
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
601
-
602
- self.skip = ConvLayer(
603
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
604
- )
605
-
606
- def forward(self, input):
607
- out = self.conv1(input)
608
- out = self.conv2(out)
609
-
610
- skip = self.skip(input)
611
- out = (out + skip) / math.sqrt(2)
612
-
613
- return out
614
-
615
-
616
- class Discriminator(nn.Module):
617
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
618
- super().__init__()
619
-
620
- channels = {
621
- 4: 512,
622
- 8: 512,
623
- 16: 512,
624
- 32: 512,
625
- 64: 256 * channel_multiplier,
626
- 128: 128 * channel_multiplier,
627
- 256: 64 * channel_multiplier,
628
- 512: 32 * channel_multiplier,
629
- 1024: 16 * channel_multiplier,
630
- }
631
-
632
- convs = [ConvLayer(3, channels[size], 1)]
633
-
634
- log_size = int(math.log(size, 2))
635
-
636
- in_channel = channels[size]
637
-
638
- for i in range(log_size, 2, -1):
639
- out_channel = channels[2 ** (i - 1)]
640
-
641
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
642
-
643
- in_channel = out_channel
644
-
645
- self.convs = nn.Sequential(*convs)
646
-
647
- self.stddev_group = 4
648
- self.stddev_feat = 1
649
-
650
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
651
- self.final_linear = nn.Sequential(
652
- EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
653
- EqualLinear(channels[4], 1),
654
- )
655
-
656
- def forward(self, input):
657
- out = self.convs(input)
658
-
659
- batch, channel, height, width = out.shape
660
- group = min(batch, self.stddev_group)
661
- stddev = out.view(
662
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
663
- )
664
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
665
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
666
- stddev = stddev.repeat(group, 1, height, width)
667
- out = torch.cat([out, stddev], 1)
668
-
669
- out = self.final_conv(out)
670
-
671
- out = out.view(batch, -1)
672
- out = self.final_linear(out)
673
-
674
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/tutorials/basic_training.md DELETED
@@ -1,416 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- [[open-in-colab]]
14
-
15
- # Train a diffusion model
16
-
17
- Unconditional image generation is a popular application of diffusion models that generates images that look like those in the dataset used for training. Typically, the best results are obtained from finetuning a pretrained model on a specific dataset. You can find many of these checkpoints on the [Hub](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model), but if you can't find one you like, you can always train your own!
18
-
19
- This tutorial will teach you how to train a [`UNet2DModel`] from scratch on a subset of the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset to generate your own 🦋 butterflies 🦋.
20
-
21
- <Tip>
22
-
23
- 💡 This training tutorial is based on the [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook. For additional details and context about diffusion models like how they work, check out the notebook!
24
-
25
- </Tip>
26
-
27
- Before you begin, make sure you have 🤗 Datasets installed to load and preprocess image datasets, and 🤗 Accelerate, to simplify training on any number of GPUs. The following command will also install [TensorBoard](https://www.tensorflow.org/tensorboard) to visualize training metrics (you can also use [Weights & Biases](https://docs.wandb.ai/) to track your training).
28
-
29
- ```py
30
- # uncomment to install the necessary libraries in Colab
31
- #!pip install diffusers[training]
32
- ```
33
-
34
- We encourage you to share your model with the community, and in order to do that, you'll need to login to your Hugging Face account (create one [here](https://hf.co/join) if you don't already have one!). You can login from a notebook and enter your token when prompted:
35
-
36
- ```py
37
- >>> from huggingface_hub import notebook_login
38
-
39
- >>> notebook_login()
40
- ```
41
-
42
- Or login in from the terminal:
43
-
44
- ```bash
45
- huggingface-cli login
46
- ```
47
-
48
- Since the model checkpoints are quite large, install [Git-LFS](https://git-lfs.com/) to version these large files:
49
-
50
- ```bash
51
- !sudo apt -qq install git-lfs
52
- !git config --global credential.helper store
53
- ```
54
-
55
- ## Training configuration
56
-
57
- For convenience, create a `TrainingConfig` class containing the training hyperparameters (feel free to adjust them):
58
-
59
- ```py
60
- >>> from dataclasses import dataclass
61
-
62
-
63
- >>> @dataclass
64
- ... class TrainingConfig:
65
- ... image_size = 128 # the generated image resolution
66
- ... train_batch_size = 16
67
- ... eval_batch_size = 16 # how many images to sample during evaluation
68
- ... num_epochs = 50
69
- ... gradient_accumulation_steps = 1
70
- ... learning_rate = 1e-4
71
- ... lr_warmup_steps = 500
72
- ... save_image_epochs = 10
73
- ... save_model_epochs = 30
74
- ... mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision
75
- ... output_dir = "ddpm-butterflies-128" # the model name locally and on the HF Hub
76
-
77
- ... push_to_hub = True # whether to upload the saved model to the HF Hub
78
- ... hub_private_repo = False
79
- ... overwrite_output_dir = True # overwrite the old model when re-running the notebook
80
- ... seed = 0
81
-
82
-
83
- >>> config = TrainingConfig()
84
- ```
85
-
86
- ## Load the dataset
87
-
88
- You can easily load the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset with the 🤗 Datasets library:
89
-
90
- ```py
91
- >>> from datasets import load_dataset
92
-
93
- >>> config.dataset_name = "huggan/smithsonian_butterflies_subset"
94
- >>> dataset = load_dataset(config.dataset_name, split="train")
95
- ```
96
-
97
- <Tip>
98
-
99
- 💡 You can find additional datasets from the [HugGan Community Event](https://huggingface.co/huggan) or you can use your own dataset by creating a local [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). Set `config.dataset_name` to the repository id of the dataset if it is from the HugGan Community Event, or `imagefolder` if you're using your own images.
100
-
101
- </Tip>
102
-
103
- 🤗 Datasets uses the [`~datasets.Image`] feature to automatically decode the image data and load it as a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html) which we can visualize:
104
-
105
- ```py
106
- >>> import matplotlib.pyplot as plt
107
-
108
- >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4))
109
- >>> for i, image in enumerate(dataset[:4]["image"]):
110
- ... axs[i].imshow(image)
111
- ... axs[i].set_axis_off()
112
- >>> fig.show()
113
- ```
114
-
115
- <div class="flex justify-center">
116
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png"/>
117
- </div>
118
-
119
- The images are all different sizes though, so you'll need to preprocess them first:
120
-
121
- * `Resize` changes the image size to the one defined in `config.image_size`.
122
- * `RandomHorizontalFlip` augments the dataset by randomly mirroring the images.
123
- * `Normalize` is important to rescale the pixel values into a [-1, 1] range, which is what the model expects.
124
-
125
- ```py
126
- >>> from torchvision import transforms
127
-
128
- >>> preprocess = transforms.Compose(
129
- ... [
130
- ... transforms.Resize((config.image_size, config.image_size)),
131
- ... transforms.RandomHorizontalFlip(),
132
- ... transforms.ToTensor(),
133
- ... transforms.Normalize([0.5], [0.5]),
134
- ... ]
135
- ... )
136
- ```
137
-
138
- Use 🤗 Datasets' [`~datasets.Dataset.set_transform`] method to apply the `preprocess` function on the fly during training:
139
-
140
- ```py
141
- >>> def transform(examples):
142
- ... images = [preprocess(image.convert("RGB")) for image in examples["image"]]
143
- ... return {"images": images}
144
-
145
-
146
- >>> dataset.set_transform(transform)
147
- ```
148
-
149
- Feel free to visualize the images again to confirm that they've been resized. Now you're ready to wrap the dataset in a [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader) for training!
150
-
151
- ```py
152
- >>> import torch
153
-
154
- >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True)
155
- ```
156
-
157
- ## Create a UNet2DModel
158
-
159
- Pretrained models in 🧨 Diffusers are easily created from their model class with the parameters you want. For example, to create a [`UNet2DModel`]:
160
-
161
- ```py
162
- >>> from diffusers import UNet2DModel
163
-
164
- >>> model = UNet2DModel(
165
- ... sample_size=config.image_size, # the target image resolution
166
- ... in_channels=3, # the number of input channels, 3 for RGB images
167
- ... out_channels=3, # the number of output channels
168
- ... layers_per_block=2, # how many ResNet layers to use per UNet block
169
- ... block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block
170
- ... down_block_types=(
171
- ... "DownBlock2D", # a regular ResNet downsampling block
172
- ... "DownBlock2D",
173
- ... "DownBlock2D",
174
- ... "DownBlock2D",
175
- ... "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention
176
- ... "DownBlock2D",
177
- ... ),
178
- ... up_block_types=(
179
- ... "UpBlock2D", # a regular ResNet upsampling block
180
- ... "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention
181
- ... "UpBlock2D",
182
- ... "UpBlock2D",
183
- ... "UpBlock2D",
184
- ... "UpBlock2D",
185
- ... ),
186
- ... )
187
- ```
188
-
189
- It is often a good idea to quickly check the sample image shape matches the model output shape:
190
-
191
- ```py
192
- >>> sample_image = dataset[0]["images"].unsqueeze(0)
193
- >>> print("Input shape:", sample_image.shape)
194
- Input shape: torch.Size([1, 3, 128, 128])
195
-
196
- >>> print("Output shape:", model(sample_image, timestep=0).sample.shape)
197
- Output shape: torch.Size([1, 3, 128, 128])
198
- ```
199
-
200
- Great! Next, you'll need a scheduler to add some noise to the image.
201
-
202
- ## Create a scheduler
203
-
204
- The scheduler behaves differently depending on whether you're using the model for training or inference. During inference, the scheduler generates image from the noise. During training, the scheduler takes a model output - or a sample - from a specific point in the diffusion process and applies noise to the image according to a *noise schedule* and an *update rule*.
205
-
206
- Let's take a look at the [`DDPMScheduler`] and use the `add_noise` method to add some random noise to the `sample_image` from before:
207
-
208
- ```py
209
- >>> import torch
210
- >>> from PIL import Image
211
- >>> from diffusers import DDPMScheduler
212
-
213
- >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
214
- >>> noise = torch.randn(sample_image.shape)
215
- >>> timesteps = torch.LongTensor([50])
216
- >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps)
217
-
218
- >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0])
219
- ```
220
-
221
- <div class="flex justify-center">
222
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png"/>
223
- </div>
224
-
225
- The training objective of the model is to predict the noise added to the image. The loss at this step can be calculated by:
226
-
227
- ```py
228
- >>> import torch.nn.functional as F
229
-
230
- >>> noise_pred = model(noisy_image, timesteps).sample
231
- >>> loss = F.mse_loss(noise_pred, noise)
232
- ```
233
-
234
- ## Train the model
235
-
236
- By now, you have most of the pieces to start training the model and all that's left is putting everything together.
237
-
238
- First, you'll need an optimizer and a learning rate scheduler:
239
-
240
- ```py
241
- >>> from diffusers.optimization import get_cosine_schedule_with_warmup
242
-
243
- >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
244
- >>> lr_scheduler = get_cosine_schedule_with_warmup(
245
- ... optimizer=optimizer,
246
- ... num_warmup_steps=config.lr_warmup_steps,
247
- ... num_training_steps=(len(train_dataloader) * config.num_epochs),
248
- ... )
249
- ```
250
-
251
- Then, you'll need a way to evaluate the model. For evaluation, you can use the [`DDPMPipeline`] to generate a batch of sample images and save it as a grid:
252
-
253
- ```py
254
- >>> from diffusers import DDPMPipeline
255
- >>> import math
256
- >>> import os
257
-
258
-
259
- >>> def make_grid(images, rows, cols):
260
- ... w, h = images[0].size
261
- ... grid = Image.new("RGB", size=(cols * w, rows * h))
262
- ... for i, image in enumerate(images):
263
- ... grid.paste(image, box=(i % cols * w, i // cols * h))
264
- ... return grid
265
-
266
-
267
- >>> def evaluate(config, epoch, pipeline):
268
- ... # Sample some images from random noise (this is the backward diffusion process).
269
- ... # The default pipeline output type is `List[PIL.Image]`
270
- ... images = pipeline(
271
- ... batch_size=config.eval_batch_size,
272
- ... generator=torch.manual_seed(config.seed),
273
- ... ).images
274
-
275
- ... # Make a grid out of the images
276
- ... image_grid = make_grid(images, rows=4, cols=4)
277
-
278
- ... # Save the images
279
- ... test_dir = os.path.join(config.output_dir, "samples")
280
- ... os.makedirs(test_dir, exist_ok=True)
281
- ... image_grid.save(f"{test_dir}/{epoch:04d}.png")
282
- ```
283
-
284
- Now you can wrap all these components together in a training loop with 🤗 Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub.
285
-
286
- <Tip>
287
-
288
- 💡 The training loop below may look intimidating and long, but it'll be worth it later when you launch your training in just one line of code! If you can't wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you're waiting for your model to finish training. 🤗
289
-
290
- </Tip>
291
-
292
- ```py
293
- >>> from accelerate import Accelerator
294
- >>> from huggingface_hub import HfFolder, Repository, whoami
295
- >>> from tqdm.auto import tqdm
296
- >>> from pathlib import Path
297
- >>> import os
298
-
299
-
300
- >>> def get_full_repo_name(model_id: str, organization: str = None, token: str = None):
301
- ... if token is None:
302
- ... token = HfFolder.get_token()
303
- ... if organization is None:
304
- ... username = whoami(token)["name"]
305
- ... return f"{username}/{model_id}"
306
- ... else:
307
- ... return f"{organization}/{model_id}"
308
-
309
-
310
- >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
311
- ... # Initialize accelerator and tensorboard logging
312
- ... accelerator = Accelerator(
313
- ... mixed_precision=config.mixed_precision,
314
- ... gradient_accumulation_steps=config.gradient_accumulation_steps,
315
- ... log_with="tensorboard",
316
- ... project_dir=os.path.join(config.output_dir, "logs"),
317
- ... )
318
- ... if accelerator.is_main_process:
319
- ... if config.push_to_hub:
320
- ... repo_name = get_full_repo_name(Path(config.output_dir).name)
321
- ... repo = Repository(config.output_dir, clone_from=repo_name)
322
- ... elif config.output_dir is not None:
323
- ... os.makedirs(config.output_dir, exist_ok=True)
324
- ... accelerator.init_trackers("train_example")
325
-
326
- ... # Prepare everything
327
- ... # There is no specific order to remember, you just need to unpack the
328
- ... # objects in the same order you gave them to the prepare method.
329
- ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
330
- ... model, optimizer, train_dataloader, lr_scheduler
331
- ... )
332
-
333
- ... global_step = 0
334
-
335
- ... # Now you train the model
336
- ... for epoch in range(config.num_epochs):
337
- ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
338
- ... progress_bar.set_description(f"Epoch {epoch}")
339
-
340
- ... for step, batch in enumerate(train_dataloader):
341
- ... clean_images = batch["images"]
342
- ... # Sample noise to add to the images
343
- ... noise = torch.randn(clean_images.shape).to(clean_images.device)
344
- ... bs = clean_images.shape[0]
345
-
346
- ... # Sample a random timestep for each image
347
- ... timesteps = torch.randint(
348
- ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device
349
- ... ).long()
350
-
351
- ... # Add noise to the clean images according to the noise magnitude at each timestep
352
- ... # (this is the forward diffusion process)
353
- ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
354
-
355
- ... with accelerator.accumulate(model):
356
- ... # Predict the noise residual
357
- ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0]
358
- ... loss = F.mse_loss(noise_pred, noise)
359
- ... accelerator.backward(loss)
360
-
361
- ... accelerator.clip_grad_norm_(model.parameters(), 1.0)
362
- ... optimizer.step()
363
- ... lr_scheduler.step()
364
- ... optimizer.zero_grad()
365
-
366
- ... progress_bar.update(1)
367
- ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
368
- ... progress_bar.set_postfix(**logs)
369
- ... accelerator.log(logs, step=global_step)
370
- ... global_step += 1
371
-
372
- ... # After each epoch you optionally sample some demo images with evaluate() and save the model
373
- ... if accelerator.is_main_process:
374
- ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)
375
-
376
- ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
377
- ... evaluate(config, epoch, pipeline)
378
-
379
- ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
380
- ... if config.push_to_hub:
381
- ... repo.push_to_hub(commit_message=f"Epoch {epoch}", blocking=True)
382
- ... else:
383
- ... pipeline.save_pretrained(config.output_dir)
384
- ```
385
-
386
- Phew, that was quite a bit of code! But you're finally ready to launch the training with 🤗 Accelerate's [`~accelerate.notebook_launcher`] function. Pass the function the training loop, all the training arguments, and the number of processes (you can change this value to the number of GPUs available to you) to use for training:
387
-
388
- ```py
389
- >>> from accelerate import notebook_launcher
390
-
391
- >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler)
392
-
393
- >>> notebook_launcher(train_loop, args, num_processes=1)
394
- ```
395
-
396
- Once training is complete, take a look at the final 🦋 images 🦋 generated by your diffusion model!
397
-
398
- ```py
399
- >>> import glob
400
-
401
- >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png"))
402
- >>> Image.open(sample_images[-1])
403
- ```
404
-
405
- <div class="flex justify-center">
406
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png"/>
407
- </div>
408
-
409
- ## Next steps
410
-
411
- Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn:
412
-
413
- * [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image.
414
- * [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject.
415
- * [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset.
416
- * [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/random_sampler.py DELETED
@@ -1,78 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_SAMPLERS
4
- from .base_sampler import BaseSampler
5
-
6
-
7
- @BBOX_SAMPLERS.register_module()
8
- class RandomSampler(BaseSampler):
9
- """Random sampler.
10
-
11
- Args:
12
- num (int): Number of samples
13
- pos_fraction (float): Fraction of positive samples
14
- neg_pos_up (int, optional): Upper bound number of negative and
15
- positive samples. Defaults to -1.
16
- add_gt_as_proposals (bool, optional): Whether to add ground truth
17
- boxes as proposals. Defaults to True.
18
- """
19
-
20
- def __init__(self,
21
- num,
22
- pos_fraction,
23
- neg_pos_ub=-1,
24
- add_gt_as_proposals=True,
25
- **kwargs):
26
- from mmdet.core.bbox import demodata
27
- super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
28
- add_gt_as_proposals)
29
- self.rng = demodata.ensure_rng(kwargs.get('rng', None))
30
-
31
- def random_choice(self, gallery, num):
32
- """Random select some elements from the gallery.
33
-
34
- If `gallery` is a Tensor, the returned indices will be a Tensor;
35
- If `gallery` is a ndarray or list, the returned indices will be a
36
- ndarray.
37
-
38
- Args:
39
- gallery (Tensor | ndarray | list): indices pool.
40
- num (int): expected sample num.
41
-
42
- Returns:
43
- Tensor or ndarray: sampled indices.
44
- """
45
- assert len(gallery) >= num
46
-
47
- is_tensor = isinstance(gallery, torch.Tensor)
48
- if not is_tensor:
49
- if torch.cuda.is_available():
50
- device = torch.cuda.current_device()
51
- else:
52
- device = 'cpu'
53
- gallery = torch.tensor(gallery, dtype=torch.long, device=device)
54
- perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
55
- rand_inds = gallery[perm]
56
- if not is_tensor:
57
- rand_inds = rand_inds.cpu().numpy()
58
- return rand_inds
59
-
60
- def _sample_pos(self, assign_result, num_expected, **kwargs):
61
- """Randomly sample some positive samples."""
62
- pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
63
- if pos_inds.numel() != 0:
64
- pos_inds = pos_inds.squeeze(1)
65
- if pos_inds.numel() <= num_expected:
66
- return pos_inds
67
- else:
68
- return self.random_choice(pos_inds, num_expected)
69
-
70
- def _sample_neg(self, assign_result, num_expected, **kwargs):
71
- """Randomly sample some negative samples."""
72
- neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
73
- if neg_inds.numel() != 0:
74
- neg_inds = neg_inds.squeeze(1)
75
- if len(neg_inds) <= num_expected:
76
- return neg_inds
77
- else:
78
- return self.random_choice(neg_inds, num_expected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- from .backbones import * # noqa: F401,F403
2
- from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
3
- ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
4
- build_detector, build_head, build_loss, build_neck,
5
- build_roi_extractor, build_shared_head)
6
- from .dense_heads import * # noqa: F401,F403
7
- from .detectors import * # noqa: F401,F403
8
- from .losses import * # noqa: F401,F403
9
- from .necks import * # noqa: F401,F403
10
- from .roi_heads import * # noqa: F401,F403
11
-
12
- __all__ = [
13
- 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
14
- 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
15
- 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/test_mixins.py DELETED
@@ -1,348 +0,0 @@
1
- import logging
2
- import sys
3
-
4
- import torch
5
-
6
- from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
7
- merge_aug_masks, multiclass_nms)
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
- if sys.version_info >= (3, 7):
12
- from mmdet.utils.contextmanagers import completed
13
-
14
-
15
- class BBoxTestMixin(object):
16
-
17
- if sys.version_info >= (3, 7):
18
-
19
- async def async_test_bboxes(self,
20
- x,
21
- img_metas,
22
- proposals,
23
- rcnn_test_cfg,
24
- rescale=False,
25
- bbox_semaphore=None,
26
- global_lock=None):
27
- """Asynchronized test for box head without augmentation."""
28
- rois = bbox2roi(proposals)
29
- roi_feats = self.bbox_roi_extractor(
30
- x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
31
- if self.with_shared_head:
32
- roi_feats = self.shared_head(roi_feats)
33
- sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
34
-
35
- async with completed(
36
- __name__, 'bbox_head_forward',
37
- sleep_interval=sleep_interval):
38
- cls_score, bbox_pred = self.bbox_head(roi_feats)
39
-
40
- img_shape = img_metas[0]['img_shape']
41
- scale_factor = img_metas[0]['scale_factor']
42
- det_bboxes, det_labels = self.bbox_head.get_bboxes(
43
- rois,
44
- cls_score,
45
- bbox_pred,
46
- img_shape,
47
- scale_factor,
48
- rescale=rescale,
49
- cfg=rcnn_test_cfg)
50
- return det_bboxes, det_labels
51
-
52
- def simple_test_bboxes(self,
53
- x,
54
- img_metas,
55
- proposals,
56
- rcnn_test_cfg,
57
- rescale=False):
58
- """Test only det bboxes without augmentation.
59
-
60
- Args:
61
- x (tuple[Tensor]): Feature maps of all scale level.
62
- img_metas (list[dict]): Image meta info.
63
- proposals (Tensor or List[Tensor]): Region proposals.
64
- rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
65
- rescale (bool): If True, return boxes in original image space.
66
- Default: False.
67
-
68
- Returns:
69
- tuple[list[Tensor], list[Tensor]]: The first list contains
70
- the boxes of the corresponding image in a batch, each
71
- tensor has the shape (num_boxes, 5) and last dimension
72
- 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor
73
- in the second list is the labels with shape (num_boxes, ).
74
- The length of both lists should be equal to batch_size.
75
- """
76
- # get origin input shape to support onnx dynamic input shape
77
- if torch.onnx.is_in_onnx_export():
78
- assert len(
79
- img_metas
80
- ) == 1, 'Only support one input image while in exporting to ONNX'
81
- img_shapes = img_metas[0]['img_shape_for_onnx']
82
- else:
83
- img_shapes = tuple(meta['img_shape'] for meta in img_metas)
84
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
85
-
86
- # The length of proposals of different batches may be different.
87
- # In order to form a batch, a padding operation is required.
88
- if isinstance(proposals, list):
89
- # padding to form a batch
90
- max_size = max([proposal.size(0) for proposal in proposals])
91
- for i, proposal in enumerate(proposals):
92
- supplement = proposal.new_full(
93
- (max_size - proposal.size(0), proposal.size(1)), 0)
94
- proposals[i] = torch.cat((supplement, proposal), dim=0)
95
- rois = torch.stack(proposals, dim=0)
96
- else:
97
- rois = proposals
98
-
99
- batch_index = torch.arange(
100
- rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
101
- rois.size(0), rois.size(1), 1)
102
- rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
103
- batch_size = rois.shape[0]
104
- num_proposals_per_img = rois.shape[1]
105
-
106
- # Eliminate the batch dimension
107
- rois = rois.view(-1, 5)
108
- bbox_results = self._bbox_forward(x, rois)
109
- cls_score = bbox_results['cls_score']
110
- bbox_pred = bbox_results['bbox_pred']
111
-
112
- # Recover the batch dimension
113
- rois = rois.reshape(batch_size, num_proposals_per_img, -1)
114
- cls_score = cls_score.reshape(batch_size, num_proposals_per_img, -1)
115
-
116
- if not torch.onnx.is_in_onnx_export():
117
- # remove padding
118
- supplement_mask = rois[..., -1] == 0
119
- cls_score[supplement_mask, :] = 0
120
-
121
- # bbox_pred would be None in some detector when with_reg is False,
122
- # e.g. Grid R-CNN.
123
- if bbox_pred is not None:
124
- # the bbox prediction of some detectors like SABL is not Tensor
125
- if isinstance(bbox_pred, torch.Tensor):
126
- bbox_pred = bbox_pred.reshape(batch_size,
127
- num_proposals_per_img, -1)
128
- if not torch.onnx.is_in_onnx_export():
129
- bbox_pred[supplement_mask, :] = 0
130
- else:
131
- # TODO: Looking forward to a better way
132
- # For SABL
133
- bbox_preds = self.bbox_head.bbox_pred_split(
134
- bbox_pred, num_proposals_per_img)
135
- # apply bbox post-processing to each image individually
136
- det_bboxes = []
137
- det_labels = []
138
- for i in range(len(proposals)):
139
- # remove padding
140
- supplement_mask = proposals[i][..., -1] == 0
141
- for bbox in bbox_preds[i]:
142
- bbox[supplement_mask] = 0
143
- det_bbox, det_label = self.bbox_head.get_bboxes(
144
- rois[i],
145
- cls_score[i],
146
- bbox_preds[i],
147
- img_shapes[i],
148
- scale_factors[i],
149
- rescale=rescale,
150
- cfg=rcnn_test_cfg)
151
- det_bboxes.append(det_bbox)
152
- det_labels.append(det_label)
153
- return det_bboxes, det_labels
154
- else:
155
- bbox_pred = None
156
-
157
- return self.bbox_head.get_bboxes(
158
- rois,
159
- cls_score,
160
- bbox_pred,
161
- img_shapes,
162
- scale_factors,
163
- rescale=rescale,
164
- cfg=rcnn_test_cfg)
165
-
166
- def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
167
- """Test det bboxes with test time augmentation."""
168
- aug_bboxes = []
169
- aug_scores = []
170
- for x, img_meta in zip(feats, img_metas):
171
- # only one image in the batch
172
- img_shape = img_meta[0]['img_shape']
173
- scale_factor = img_meta[0]['scale_factor']
174
- flip = img_meta[0]['flip']
175
- flip_direction = img_meta[0]['flip_direction']
176
- # TODO more flexible
177
- proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
178
- scale_factor, flip, flip_direction)
179
- rois = bbox2roi([proposals])
180
- bbox_results = self._bbox_forward(x, rois)
181
- bboxes, scores = self.bbox_head.get_bboxes(
182
- rois,
183
- bbox_results['cls_score'],
184
- bbox_results['bbox_pred'],
185
- img_shape,
186
- scale_factor,
187
- rescale=False,
188
- cfg=None)
189
- aug_bboxes.append(bboxes)
190
- aug_scores.append(scores)
191
- # after merging, bboxes will be rescaled to the original image size
192
- merged_bboxes, merged_scores = merge_aug_bboxes(
193
- aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
194
- det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
195
- rcnn_test_cfg.score_thr,
196
- rcnn_test_cfg.nms,
197
- rcnn_test_cfg.max_per_img)
198
- return det_bboxes, det_labels
199
-
200
-
201
- class MaskTestMixin(object):
202
-
203
- if sys.version_info >= (3, 7):
204
-
205
- async def async_test_mask(self,
206
- x,
207
- img_metas,
208
- det_bboxes,
209
- det_labels,
210
- rescale=False,
211
- mask_test_cfg=None):
212
- """Asynchronized test for mask head without augmentation."""
213
- # image shape of the first image in the batch (only one)
214
- ori_shape = img_metas[0]['ori_shape']
215
- scale_factor = img_metas[0]['scale_factor']
216
- if det_bboxes.shape[0] == 0:
217
- segm_result = [[] for _ in range(self.mask_head.num_classes)]
218
- else:
219
- if rescale and not isinstance(scale_factor,
220
- (float, torch.Tensor)):
221
- scale_factor = det_bboxes.new_tensor(scale_factor)
222
- _bboxes = (
223
- det_bboxes[:, :4] *
224
- scale_factor if rescale else det_bboxes)
225
- mask_rois = bbox2roi([_bboxes])
226
- mask_feats = self.mask_roi_extractor(
227
- x[:len(self.mask_roi_extractor.featmap_strides)],
228
- mask_rois)
229
-
230
- if self.with_shared_head:
231
- mask_feats = self.shared_head(mask_feats)
232
- if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
233
- sleep_interval = mask_test_cfg['async_sleep_interval']
234
- else:
235
- sleep_interval = 0.035
236
- async with completed(
237
- __name__,
238
- 'mask_head_forward',
239
- sleep_interval=sleep_interval):
240
- mask_pred = self.mask_head(mask_feats)
241
- segm_result = self.mask_head.get_seg_masks(
242
- mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
243
- scale_factor, rescale)
244
- return segm_result
245
-
246
- def simple_test_mask(self,
247
- x,
248
- img_metas,
249
- det_bboxes,
250
- det_labels,
251
- rescale=False):
252
- """Simple test for mask head without augmentation."""
253
- # image shapes of images in the batch
254
- ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
255
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
256
-
257
- # The length of proposals of different batches may be different.
258
- # In order to form a batch, a padding operation is required.
259
- if isinstance(det_bboxes, list):
260
- # padding to form a batch
261
- max_size = max([bboxes.size(0) for bboxes in det_bboxes])
262
- for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):
263
- supplement_bbox = bbox.new_full(
264
- (max_size - bbox.size(0), bbox.size(1)), 0)
265
- supplement_label = label.new_full((max_size - label.size(0), ),
266
- 0)
267
- det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)
268
- det_labels[i] = torch.cat((supplement_label, label), dim=0)
269
- det_bboxes = torch.stack(det_bboxes, dim=0)
270
- det_labels = torch.stack(det_labels, dim=0)
271
-
272
- batch_size = det_bboxes.size(0)
273
- num_proposals_per_img = det_bboxes.shape[1]
274
-
275
- # if det_bboxes is rescaled to the original image size, we need to
276
- # rescale it back to the testing scale to obtain RoIs.
277
- det_bboxes = det_bboxes[..., :4]
278
- if rescale:
279
- if not isinstance(scale_factors[0], float):
280
- scale_factors = det_bboxes.new_tensor(scale_factors)
281
- det_bboxes = det_bboxes * scale_factors.unsqueeze(1)
282
-
283
- batch_index = torch.arange(
284
- det_bboxes.size(0), device=det_bboxes.device).float().view(
285
- -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
286
- mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
287
- mask_rois = mask_rois.view(-1, 5)
288
- mask_results = self._mask_forward(x, mask_rois)
289
- mask_pred = mask_results['mask_pred']
290
-
291
- # Recover the batch dimension
292
- mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,
293
- *mask_pred.shape[1:])
294
-
295
- # apply mask post-processing to each image individually
296
- segm_results = []
297
- for i in range(batch_size):
298
- mask_pred = mask_preds[i]
299
- det_bbox = det_bboxes[i]
300
- det_label = det_labels[i]
301
-
302
- # remove padding
303
- supplement_mask = det_bbox[..., -1] != 0
304
- mask_pred = mask_pred[supplement_mask]
305
- det_bbox = det_bbox[supplement_mask]
306
- det_label = det_label[supplement_mask]
307
-
308
- if det_label.shape[0] == 0:
309
- segm_results.append([[]
310
- for _ in range(self.mask_head.num_classes)
311
- ])
312
- else:
313
- segm_result = self.mask_head.get_seg_masks(
314
- mask_pred, det_bbox, det_label, self.test_cfg,
315
- ori_shapes[i], scale_factors[i], rescale)
316
- segm_results.append(segm_result)
317
- return segm_results
318
-
319
- def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
320
- """Test for mask head with test time augmentation."""
321
- if det_bboxes.shape[0] == 0:
322
- segm_result = [[] for _ in range(self.mask_head.num_classes)]
323
- else:
324
- aug_masks = []
325
- for x, img_meta in zip(feats, img_metas):
326
- img_shape = img_meta[0]['img_shape']
327
- scale_factor = img_meta[0]['scale_factor']
328
- flip = img_meta[0]['flip']
329
- flip_direction = img_meta[0]['flip_direction']
330
- _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
331
- scale_factor, flip, flip_direction)
332
- mask_rois = bbox2roi([_bboxes])
333
- mask_results = self._mask_forward(x, mask_rois)
334
- # convert to numpy array to save memory
335
- aug_masks.append(
336
- mask_results['mask_pred'].sigmoid().cpu().numpy())
337
- merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
338
-
339
- ori_shape = img_metas[0][0]['ori_shape']
340
- segm_result = self.mask_head.get_seg_masks(
341
- merged_masks,
342
- det_bboxes,
343
- det_labels,
344
- self.test_cfg,
345
- ori_shape,
346
- scale_factor=1.0,
347
- rescale=False)
348
- return segm_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/utils/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- from .builder import build_positional_encoding, build_transformer
2
- from .gaussian_target import gaussian_radius, gen_gaussian_target
3
- from .positional_encoding import (LearnedPositionalEncoding,
4
- SinePositionalEncoding)
5
- from .res_layer import ResLayer, SimplifiedBasicBlock
6
- from .transformer import (FFN, DynamicConv, MultiheadAttention, Transformer,
7
- TransformerDecoder, TransformerDecoderLayer,
8
- TransformerEncoder, TransformerEncoderLayer)
9
-
10
- __all__ = [
11
- 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'MultiheadAttention',
12
- 'FFN', 'TransformerEncoderLayer', 'TransformerEncoder',
13
- 'TransformerDecoderLayer', 'TransformerDecoder', 'Transformer',
14
- 'build_transformer', 'build_positional_encoding', 'SinePositionalEncoding',
15
- 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock'
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/mel_processing.py DELETED
@@ -1,101 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
-
5
- MAX_WAV_VALUE = 32768.0
6
-
7
-
8
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
9
- """
10
- PARAMS
11
- ------
12
- C: compression factor
13
- """
14
- return torch.log(torch.clamp(x, min=clip_val) * C)
15
-
16
-
17
- def dynamic_range_decompression_torch(x, C=1):
18
- """
19
- PARAMS
20
- ------
21
- C: compression factor used to compress
22
- """
23
- return torch.exp(x) / C
24
-
25
-
26
- def spectral_normalize_torch(magnitudes):
27
- output = dynamic_range_compression_torch(magnitudes)
28
- return output
29
-
30
-
31
- def spectral_de_normalize_torch(magnitudes):
32
- output = dynamic_range_decompression_torch(magnitudes)
33
- return output
34
-
35
-
36
- mel_basis = {}
37
- hann_window = {}
38
-
39
-
40
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
- if torch.min(y) < -1.:
42
- print('min value is ', torch.min(y))
43
- if torch.max(y) > 1.:
44
- print('max value is ', torch.max(y))
45
-
46
- global hann_window
47
- dtype_device = str(y.dtype) + '_' + str(y.device)
48
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
49
- if wnsize_dtype_device not in hann_window:
50
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
51
-
52
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
53
- y = y.squeeze(1)
54
-
55
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
56
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
57
-
58
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
59
- return spec
60
-
61
-
62
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
63
- global mel_basis
64
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
65
- fmax_dtype_device = str(fmax) + '_' + dtype_device
66
- if fmax_dtype_device not in mel_basis:
67
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
68
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
69
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
70
- spec = spectral_normalize_torch(spec)
71
- return spec
72
-
73
-
74
- def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
75
- if torch.min(y) < -1.:
76
- print('min value is ', torch.min(y))
77
- if torch.max(y) > 1.:
78
- print('max value is ', torch.max(y))
79
-
80
- global mel_basis, hann_window
81
- dtype_device = str(y.dtype) + '_' + str(y.device)
82
- fmax_dtype_device = str(fmax) + '_' + dtype_device
83
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
84
- if fmax_dtype_device not in mel_basis:
85
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
86
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
87
- if wnsize_dtype_device not in hann_window:
88
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
89
-
90
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
91
- y = y.squeeze(1)
92
-
93
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
94
- center=center, pad_mode='reflect', normalized=False, onesided=True)
95
-
96
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
97
-
98
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
99
- spec = spectral_normalize_torch(spec)
100
-
101
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/tailwind.config.js DELETED
@@ -1,72 +0,0 @@
1
- /** @type {import('tailwindcss').Config} */
2
- module.exports = {
3
- darkMode: ["class"],
4
- content: [
5
- './pages/**/*.{ts,tsx}',
6
- './components/**/*.{ts,tsx}',
7
- './app/**/*.{ts,tsx}',
8
- './src/**/*.{ts,tsx}',
9
- './src/lib/fonts.ts'
10
- ],
11
- theme: {
12
- container: {
13
- center: true,
14
- padding: "2rem",
15
- screens: {
16
- "2xl": "1400px",
17
- },
18
- },
19
- extend: {
20
- fontFamily: {
21
- indieflower: ['var(--font-indieflower)'],
22
- thegirlnextdoor: ['var(--font-the-girl-next-door)'],
23
- komika: ['var(--font-komika)'],
24
- actionman: ['var(--font-action-man)'],
25
- karantula: ['var(--font-karantula)'],
26
- manoskope: ['var(--font-manoskope)'],
27
- paeteround: ['var(--font-paete-round)'],
28
- qarmic: ['var(--font-qarmic-sans)'],
29
- archrival: ['var(--font-sf-arch-rival)'],
30
- cartoonist: ['var(--font-sf-cartoonist-hand)'],
31
- toontime: ['var(--font-sf-toontime)'],
32
- vtc: ['var(--font-vtc-letterer-pro)'],
33
- digitalstrip: ['var(--font-digital-strip-bb)'],
34
- },
35
- fontSize: {
36
- "7xs": "5px",
37
- "7xs": "6px",
38
- "6xs": "7px",
39
- "5xs": "8px",
40
- "4xs": "9px",
41
- "3xs": "10px",
42
- "2xs": "11px"
43
- },
44
- keyframes: {
45
- "accordion-down": {
46
- from: { height: 0 },
47
- to: { height: "var(--radix-accordion-content-height)" },
48
- },
49
- "accordion-up": {
50
- from: { height: "var(--radix-accordion-content-height)" },
51
- to: { height: 0 },
52
- },
53
- },
54
- animation: {
55
- "accordion-down": "accordion-down 0.2s ease-out",
56
- "accordion-up": "accordion-up 0.2s ease-out",
57
- },
58
- screens: {
59
- 'print': { 'raw': 'print' },
60
- },
61
- gridTemplateColumns: {
62
- '12': 'repeat(12, minmax(0, 1fr))',
63
- '16': 'repeat(16, minmax(0, 1fr))',
64
- },
65
- gridTemplateRows: {
66
- '12': 'repeat(12, minmax(0, 1fr))',
67
- '16': 'repeat(16, minmax(0, 1fr))',
68
- }
69
- },
70
- },
71
- plugins: [require("tailwindcss-animate")],
72
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: VoiceChange
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.28.3
8
- app_file: app_multi.py
9
- pinned: false
10
- license: mit
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/utils/streamToAsyncIterable.ts DELETED
@@ -1,15 +0,0 @@
1
- // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#iterating_over_async_generators
2
- export async function* streamToAsyncIterable(
3
- stream: ReadableStream<Uint8Array>
4
- ): AsyncIterableIterator<Uint8Array> {
5
- const reader = stream.getReader();
6
- try {
7
- while (true) {
8
- const { done, value } = await reader.read();
9
- if (done) return;
10
- yield value;
11
- }
12
- } finally {
13
- reader.releaseLock();
14
- }
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/core.py DELETED
@@ -1,108 +0,0 @@
1
- """
2
- certifi.py
3
- ~~~~~~~~~~
4
-
5
- This module returns the installation location of cacert.pem or its contents.
6
- """
7
- import sys
8
-
9
-
10
- if sys.version_info >= (3, 11):
11
-
12
- from importlib.resources import as_file, files
13
-
14
- _CACERT_CTX = None
15
- _CACERT_PATH = None
16
-
17
- def where() -> str:
18
- # This is slightly terrible, but we want to delay extracting the file
19
- # in cases where we're inside of a zipimport situation until someone
20
- # actually calls where(), but we don't want to re-extract the file
21
- # on every call of where(), so we'll do it once then store it in a
22
- # global variable.
23
- global _CACERT_CTX
24
- global _CACERT_PATH
25
- if _CACERT_PATH is None:
26
- # This is slightly janky, the importlib.resources API wants you to
27
- # manage the cleanup of this file, so it doesn't actually return a
28
- # path, it returns a context manager that will give you the path
29
- # when you enter it and will do any cleanup when you leave it. In
30
- # the common case of not needing a temporary file, it will just
31
- # return the file system location and the __exit__() is a no-op.
32
- #
33
- # We also have to hold onto the actual context manager, because
34
- # it will do the cleanup whenever it gets garbage collected, so
35
- # we will also store that at the global level as well.
36
- _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem"))
37
- _CACERT_PATH = str(_CACERT_CTX.__enter__())
38
-
39
- return _CACERT_PATH
40
-
41
- def contents() -> str:
42
- return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii")
43
-
44
- elif sys.version_info >= (3, 7):
45
-
46
- from importlib.resources import path as get_path, read_text
47
-
48
- _CACERT_CTX = None
49
- _CACERT_PATH = None
50
-
51
- def where() -> str:
52
- # This is slightly terrible, but we want to delay extracting the
53
- # file in cases where we're inside of a zipimport situation until
54
- # someone actually calls where(), but we don't want to re-extract
55
- # the file on every call of where(), so we'll do it once then store
56
- # it in a global variable.
57
- global _CACERT_CTX
58
- global _CACERT_PATH
59
- if _CACERT_PATH is None:
60
- # This is slightly janky, the importlib.resources API wants you
61
- # to manage the cleanup of this file, so it doesn't actually
62
- # return a path, it returns a context manager that will give
63
- # you the path when you enter it and will do any cleanup when
64
- # you leave it. In the common case of not needing a temporary
65
- # file, it will just return the file system location and the
66
- # __exit__() is a no-op.
67
- #
68
- # We also have to hold onto the actual context manager, because
69
- # it will do the cleanup whenever it gets garbage collected, so
70
- # we will also store that at the global level as well.
71
- _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
72
- _CACERT_PATH = str(_CACERT_CTX.__enter__())
73
-
74
- return _CACERT_PATH
75
-
76
- def contents() -> str:
77
- return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
78
-
79
- else:
80
- import os
81
- import types
82
- from typing import Union
83
-
84
- Package = Union[types.ModuleType, str]
85
- Resource = Union[str, "os.PathLike"]
86
-
87
- # This fallback will work for Python versions prior to 3.7 that lack the
88
- # importlib.resources module but relies on the existing `where` function
89
- # so won't address issues with environments like PyOxidizer that don't set
90
- # __file__ on modules.
91
- def read_text(
92
- package: Package,
93
- resource: Resource,
94
- encoding: str = 'utf-8',
95
- errors: str = 'strict'
96
- ) -> str:
97
- with open(where(), encoding=encoding) as data:
98
- return data.read()
99
-
100
- # If we don't have importlib.resources, then we will just do the old logic
101
- # of assuming we're on the filesystem and munge the path directly.
102
- def where() -> str:
103
- f = os.path.dirname(__file__)
104
-
105
- return os.path.join(f, "cacert.pem")
106
-
107
- def contents() -> str:
108
- return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/text.py DELETED
@@ -1,1307 +0,0 @@
1
- import re
2
- from functools import partial, reduce
3
- from math import gcd
4
- from operator import itemgetter
5
- from typing import (
6
- TYPE_CHECKING,
7
- Any,
8
- Callable,
9
- Dict,
10
- Iterable,
11
- List,
12
- NamedTuple,
13
- Optional,
14
- Tuple,
15
- Union,
16
- )
17
-
18
- from ._loop import loop_last
19
- from ._pick import pick_bool
20
- from ._wrap import divide_line
21
- from .align import AlignMethod
22
- from .cells import cell_len, set_cell_size
23
- from .containers import Lines
24
- from .control import strip_control_codes
25
- from .emoji import EmojiVariant
26
- from .jupyter import JupyterMixin
27
- from .measure import Measurement
28
- from .segment import Segment
29
- from .style import Style, StyleType
30
-
31
- if TYPE_CHECKING: # pragma: no cover
32
- from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod
33
-
34
- DEFAULT_JUSTIFY: "JustifyMethod" = "default"
35
- DEFAULT_OVERFLOW: "OverflowMethod" = "fold"
36
-
37
-
38
- _re_whitespace = re.compile(r"\s+$")
39
-
40
- TextType = Union[str, "Text"]
41
-
42
- GetStyleCallable = Callable[[str], Optional[StyleType]]
43
-
44
-
45
- class Span(NamedTuple):
46
- """A marked up region in some text."""
47
-
48
- start: int
49
- """Span start index."""
50
- end: int
51
- """Span end index."""
52
- style: Union[str, Style]
53
- """Style associated with the span."""
54
-
55
- def __repr__(self) -> str:
56
- return f"Span({self.start}, {self.end}, {self.style!r})"
57
-
58
- def __bool__(self) -> bool:
59
- return self.end > self.start
60
-
61
- def split(self, offset: int) -> Tuple["Span", Optional["Span"]]:
62
- """Split a span in to 2 from a given offset."""
63
-
64
- if offset < self.start:
65
- return self, None
66
- if offset >= self.end:
67
- return self, None
68
-
69
- start, end, style = self
70
- span1 = Span(start, min(end, offset), style)
71
- span2 = Span(span1.end, end, style)
72
- return span1, span2
73
-
74
- def move(self, offset: int) -> "Span":
75
- """Move start and end by a given offset.
76
-
77
- Args:
78
- offset (int): Number of characters to add to start and end.
79
-
80
- Returns:
81
- TextSpan: A new TextSpan with adjusted position.
82
- """
83
- start, end, style = self
84
- return Span(start + offset, end + offset, style)
85
-
86
- def right_crop(self, offset: int) -> "Span":
87
- """Crop the span at the given offset.
88
-
89
- Args:
90
- offset (int): A value between start and end.
91
-
92
- Returns:
93
- Span: A new (possibly smaller) span.
94
- """
95
- start, end, style = self
96
- if offset >= end:
97
- return self
98
- return Span(start, min(offset, end), style)
99
-
100
-
101
- class Text(JupyterMixin):
102
- """Text with color / style.
103
-
104
- Args:
105
- text (str, optional): Default unstyled text. Defaults to "".
106
- style (Union[str, Style], optional): Base style for text. Defaults to "".
107
- justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
108
- overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
109
- no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
110
- end (str, optional): Character to end text with. Defaults to "\\\\n".
111
- tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
112
- spans (List[Span], optional). A list of predefined style spans. Defaults to None.
113
- """
114
-
115
- __slots__ = [
116
- "_text",
117
- "style",
118
- "justify",
119
- "overflow",
120
- "no_wrap",
121
- "end",
122
- "tab_size",
123
- "_spans",
124
- "_length",
125
- ]
126
-
127
- def __init__(
128
- self,
129
- text: str = "",
130
- style: Union[str, Style] = "",
131
- *,
132
- justify: Optional["JustifyMethod"] = None,
133
- overflow: Optional["OverflowMethod"] = None,
134
- no_wrap: Optional[bool] = None,
135
- end: str = "\n",
136
- tab_size: Optional[int] = 8,
137
- spans: Optional[List[Span]] = None,
138
- ) -> None:
139
- sanitized_text = strip_control_codes(text)
140
- self._text = [sanitized_text]
141
- self.style = style
142
- self.justify: Optional["JustifyMethod"] = justify
143
- self.overflow: Optional["OverflowMethod"] = overflow
144
- self.no_wrap = no_wrap
145
- self.end = end
146
- self.tab_size = tab_size
147
- self._spans: List[Span] = spans or []
148
- self._length: int = len(sanitized_text)
149
-
150
- def __len__(self) -> int:
151
- return self._length
152
-
153
- def __bool__(self) -> bool:
154
- return bool(self._length)
155
-
156
- def __str__(self) -> str:
157
- return self.plain
158
-
159
- def __repr__(self) -> str:
160
- return f"<text {self.plain!r} {self._spans!r}>"
161
-
162
- def __add__(self, other: Any) -> "Text":
163
- if isinstance(other, (str, Text)):
164
- result = self.copy()
165
- result.append(other)
166
- return result
167
- return NotImplemented
168
-
169
- def __eq__(self, other: object) -> bool:
170
- if not isinstance(other, Text):
171
- return NotImplemented
172
- return self.plain == other.plain and self._spans == other._spans
173
-
174
- def __contains__(self, other: object) -> bool:
175
- if isinstance(other, str):
176
- return other in self.plain
177
- elif isinstance(other, Text):
178
- return other.plain in self.plain
179
- return False
180
-
181
- def __getitem__(self, slice: Union[int, slice]) -> "Text":
182
- def get_text_at(offset: int) -> "Text":
183
- _Span = Span
184
- text = Text(
185
- self.plain[offset],
186
- spans=[
187
- _Span(0, 1, style)
188
- for start, end, style in self._spans
189
- if end > offset >= start
190
- ],
191
- end="",
192
- )
193
- return text
194
-
195
- if isinstance(slice, int):
196
- return get_text_at(slice)
197
- else:
198
- start, stop, step = slice.indices(len(self.plain))
199
- if step == 1:
200
- lines = self.divide([start, stop])
201
- return lines[1]
202
- else:
203
- # This would be a bit of work to implement efficiently
204
- # For now, its not required
205
- raise TypeError("slices with step!=1 are not supported")
206
-
207
- @property
208
- def cell_len(self) -> int:
209
- """Get the number of cells required to render this text."""
210
- return cell_len(self.plain)
211
-
212
- @property
213
- def markup(self) -> str:
214
- """Get console markup to render this Text.
215
-
216
- Returns:
217
- str: A string potentially creating markup tags.
218
- """
219
- from .markup import escape
220
-
221
- output: List[str] = []
222
-
223
- plain = self.plain
224
- markup_spans = [
225
- (0, False, self.style),
226
- *((span.start, False, span.style) for span in self._spans),
227
- *((span.end, True, span.style) for span in self._spans),
228
- (len(plain), True, self.style),
229
- ]
230
- markup_spans.sort(key=itemgetter(0, 1))
231
- position = 0
232
- append = output.append
233
- for offset, closing, style in markup_spans:
234
- if offset > position:
235
- append(escape(plain[position:offset]))
236
- position = offset
237
- if style:
238
- append(f"[/{style}]" if closing else f"[{style}]")
239
- markup = "".join(output)
240
- return markup
241
-
242
- @classmethod
243
- def from_markup(
244
- cls,
245
- text: str,
246
- *,
247
- style: Union[str, Style] = "",
248
- emoji: bool = True,
249
- emoji_variant: Optional[EmojiVariant] = None,
250
- justify: Optional["JustifyMethod"] = None,
251
- overflow: Optional["OverflowMethod"] = None,
252
- end: str = "\n",
253
- ) -> "Text":
254
- """Create Text instance from markup.
255
-
256
- Args:
257
- text (str): A string containing console markup.
258
- emoji (bool, optional): Also render emoji code. Defaults to True.
259
- justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
260
- overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
261
- end (str, optional): Character to end text with. Defaults to "\\\\n".
262
-
263
- Returns:
264
- Text: A Text instance with markup rendered.
265
- """
266
- from .markup import render
267
-
268
- rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
269
- rendered_text.justify = justify
270
- rendered_text.overflow = overflow
271
- rendered_text.end = end
272
- return rendered_text
273
-
274
- @classmethod
275
- def from_ansi(
276
- cls,
277
- text: str,
278
- *,
279
- style: Union[str, Style] = "",
280
- justify: Optional["JustifyMethod"] = None,
281
- overflow: Optional["OverflowMethod"] = None,
282
- no_wrap: Optional[bool] = None,
283
- end: str = "\n",
284
- tab_size: Optional[int] = 8,
285
- ) -> "Text":
286
- """Create a Text object from a string containing ANSI escape codes.
287
-
288
- Args:
289
- text (str): A string containing escape codes.
290
- style (Union[str, Style], optional): Base style for text. Defaults to "".
291
- justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
292
- overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
293
- no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
294
- end (str, optional): Character to end text with. Defaults to "\\\\n".
295
- tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
296
- """
297
- from .ansi import AnsiDecoder
298
-
299
- joiner = Text(
300
- "\n",
301
- justify=justify,
302
- overflow=overflow,
303
- no_wrap=no_wrap,
304
- end=end,
305
- tab_size=tab_size,
306
- style=style,
307
- )
308
- decoder = AnsiDecoder()
309
- result = joiner.join(line for line in decoder.decode(text))
310
- return result
311
-
312
- @classmethod
313
- def styled(
314
- cls,
315
- text: str,
316
- style: StyleType = "",
317
- *,
318
- justify: Optional["JustifyMethod"] = None,
319
- overflow: Optional["OverflowMethod"] = None,
320
- ) -> "Text":
321
- """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
322
- to pad the text when it is justified.
323
-
324
- Args:
325
- text (str): A string containing console markup.
326
- style (Union[str, Style]): Style to apply to the text. Defaults to "".
327
- justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
328
- overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
329
-
330
- Returns:
331
- Text: A text instance with a style applied to the entire string.
332
- """
333
- styled_text = cls(text, justify=justify, overflow=overflow)
334
- styled_text.stylize(style)
335
- return styled_text
336
-
337
- @classmethod
338
- def assemble(
339
- cls,
340
- *parts: Union[str, "Text", Tuple[str, StyleType]],
341
- style: Union[str, Style] = "",
342
- justify: Optional["JustifyMethod"] = None,
343
- overflow: Optional["OverflowMethod"] = None,
344
- no_wrap: Optional[bool] = None,
345
- end: str = "\n",
346
- tab_size: int = 8,
347
- meta: Optional[Dict[str, Any]] = None,
348
- ) -> "Text":
349
- """Construct a text instance by combining a sequence of strings with optional styles.
350
- The positional arguments should be either strings, or a tuple of string + style.
351
-
352
- Args:
353
- style (Union[str, Style], optional): Base style for text. Defaults to "".
354
- justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
355
- overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
356
- end (str, optional): Character to end text with. Defaults to "\\\\n".
357
- tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
358
- meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
359
-
360
- Returns:
361
- Text: A new text instance.
362
- """
363
- text = cls(
364
- style=style,
365
- justify=justify,
366
- overflow=overflow,
367
- no_wrap=no_wrap,
368
- end=end,
369
- tab_size=tab_size,
370
- )
371
- append = text.append
372
- _Text = Text
373
- for part in parts:
374
- if isinstance(part, (_Text, str)):
375
- append(part)
376
- else:
377
- append(*part)
378
- if meta:
379
- text.apply_meta(meta)
380
- return text
381
-
382
- @property
383
- def plain(self) -> str:
384
- """Get the text as a single string."""
385
- if len(self._text) != 1:
386
- self._text[:] = ["".join(self._text)]
387
- return self._text[0]
388
-
389
- @plain.setter
390
- def plain(self, new_text: str) -> None:
391
- """Set the text to a new value."""
392
- if new_text != self.plain:
393
- sanitized_text = strip_control_codes(new_text)
394
- self._text[:] = [sanitized_text]
395
- old_length = self._length
396
- self._length = len(sanitized_text)
397
- if old_length > self._length:
398
- self._trim_spans()
399
-
400
- @property
401
- def spans(self) -> List[Span]:
402
- """Get a reference to the internal list of spans."""
403
- return self._spans
404
-
405
- @spans.setter
406
- def spans(self, spans: List[Span]) -> None:
407
- """Set spans."""
408
- self._spans = spans[:]
409
-
410
- def blank_copy(self, plain: str = "") -> "Text":
411
- """Return a new Text instance with copied meta data (but not the string or spans)."""
412
- copy_self = Text(
413
- plain,
414
- style=self.style,
415
- justify=self.justify,
416
- overflow=self.overflow,
417
- no_wrap=self.no_wrap,
418
- end=self.end,
419
- tab_size=self.tab_size,
420
- )
421
- return copy_self
422
-
423
- def copy(self) -> "Text":
424
- """Return a copy of this instance."""
425
- copy_self = Text(
426
- self.plain,
427
- style=self.style,
428
- justify=self.justify,
429
- overflow=self.overflow,
430
- no_wrap=self.no_wrap,
431
- end=self.end,
432
- tab_size=self.tab_size,
433
- )
434
- copy_self._spans[:] = self._spans
435
- return copy_self
436
-
437
- def stylize(
438
- self,
439
- style: Union[str, Style],
440
- start: int = 0,
441
- end: Optional[int] = None,
442
- ) -> None:
443
- """Apply a style to the text, or a portion of the text.
444
-
445
- Args:
446
- style (Union[str, Style]): Style instance or style definition to apply.
447
- start (int): Start offset (negative indexing is supported). Defaults to 0.
448
- end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
449
- """
450
- if style:
451
- length = len(self)
452
- if start < 0:
453
- start = length + start
454
- if end is None:
455
- end = length
456
- if end < 0:
457
- end = length + end
458
- if start >= length or end <= start:
459
- # Span not in text or not valid
460
- return
461
- self._spans.append(Span(start, min(length, end), style))
462
-
463
- def stylize_before(
464
- self,
465
- style: Union[str, Style],
466
- start: int = 0,
467
- end: Optional[int] = None,
468
- ) -> None:
469
- """Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
470
-
471
- Args:
472
- style (Union[str, Style]): Style instance or style definition to apply.
473
- start (int): Start offset (negative indexing is supported). Defaults to 0.
474
- end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
475
- """
476
- if style:
477
- length = len(self)
478
- if start < 0:
479
- start = length + start
480
- if end is None:
481
- end = length
482
- if end < 0:
483
- end = length + end
484
- if start >= length or end <= start:
485
- # Span not in text or not valid
486
- return
487
- self._spans.insert(0, Span(start, min(length, end), style))
488
-
489
- def apply_meta(
490
- self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
491
- ) -> None:
492
- """Apply meta data to the text, or a portion of the text.
493
-
494
- Args:
495
- meta (Dict[str, Any]): A dict of meta information.
496
- start (int): Start offset (negative indexing is supported). Defaults to 0.
497
- end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
498
-
499
- """
500
- style = Style.from_meta(meta)
501
- self.stylize(style, start=start, end=end)
502
-
503
- def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
504
- """Apply event handlers (used by Textual project).
505
-
506
- Example:
507
- >>> from rich.text import Text
508
- >>> text = Text("hello world")
509
- >>> text.on(click="view.toggle('world')")
510
-
511
- Args:
512
- meta (Dict[str, Any]): Mapping of meta information.
513
- **handlers: Keyword args are prefixed with "@" to defined handlers.
514
-
515
- Returns:
516
- Text: Self is returned to method may be chained.
517
- """
518
- meta = {} if meta is None else meta
519
- meta.update({f"@{key}": value for key, value in handlers.items()})
520
- self.stylize(Style.from_meta(meta))
521
- return self
522
-
523
- def remove_suffix(self, suffix: str) -> None:
524
- """Remove a suffix if it exists.
525
-
526
- Args:
527
- suffix (str): Suffix to remove.
528
- """
529
- if self.plain.endswith(suffix):
530
- self.right_crop(len(suffix))
531
-
532
- def get_style_at_offset(self, console: "Console", offset: int) -> Style:
533
- """Get the style of a character at give offset.
534
-
535
- Args:
536
- console (~Console): Console where text will be rendered.
537
- offset (int): Offset in to text (negative indexing supported)
538
-
539
- Returns:
540
- Style: A Style instance.
541
- """
542
- # TODO: This is a little inefficient, it is only used by full justify
543
- if offset < 0:
544
- offset = len(self) + offset
545
- get_style = console.get_style
546
- style = get_style(self.style).copy()
547
- for start, end, span_style in self._spans:
548
- if end > offset >= start:
549
- style += get_style(span_style, default="")
550
- return style
551
-
552
- def highlight_regex(
553
- self,
554
- re_highlight: str,
555
- style: Optional[Union[GetStyleCallable, StyleType]] = None,
556
- *,
557
- style_prefix: str = "",
558
- ) -> int:
559
- """Highlight text with a regular expression, where group names are
560
- translated to styles.
561
-
562
- Args:
563
- re_highlight (str): A regular expression.
564
- style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
565
- which accepts the matched text and returns a style. Defaults to None.
566
- style_prefix (str, optional): Optional prefix to add to style group names.
567
-
568
- Returns:
569
- int: Number of regex matches
570
- """
571
- count = 0
572
- append_span = self._spans.append
573
- _Span = Span
574
- plain = self.plain
575
- for match in re.finditer(re_highlight, plain):
576
- get_span = match.span
577
- if style:
578
- start, end = get_span()
579
- match_style = style(plain[start:end]) if callable(style) else style
580
- if match_style is not None and end > start:
581
- append_span(_Span(start, end, match_style))
582
-
583
- count += 1
584
- for name in match.groupdict().keys():
585
- start, end = get_span(name)
586
- if start != -1 and end > start:
587
- append_span(_Span(start, end, f"{style_prefix}{name}"))
588
- return count
589
-
590
- def highlight_words(
591
- self,
592
- words: Iterable[str],
593
- style: Union[str, Style],
594
- *,
595
- case_sensitive: bool = True,
596
- ) -> int:
597
- """Highlight words with a style.
598
-
599
- Args:
600
- words (Iterable[str]): Worlds to highlight.
601
- style (Union[str, Style]): Style to apply.
602
- case_sensitive (bool, optional): Enable case sensitive matchings. Defaults to True.
603
-
604
- Returns:
605
- int: Number of words highlighted.
606
- """
607
- re_words = "|".join(re.escape(word) for word in words)
608
- add_span = self._spans.append
609
- count = 0
610
- _Span = Span
611
- for match in re.finditer(
612
- re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
613
- ):
614
- start, end = match.span(0)
615
- add_span(_Span(start, end, style))
616
- count += 1
617
- return count
618
-
619
- def rstrip(self) -> None:
620
- """Strip whitespace from end of text."""
621
- self.plain = self.plain.rstrip()
622
-
623
- def rstrip_end(self, size: int) -> None:
624
- """Remove whitespace beyond a certain width at the end of the text.
625
-
626
- Args:
627
- size (int): The desired size of the text.
628
- """
629
- text_length = len(self)
630
- if text_length > size:
631
- excess = text_length - size
632
- whitespace_match = _re_whitespace.search(self.plain)
633
- if whitespace_match is not None:
634
- whitespace_count = len(whitespace_match.group(0))
635
- self.right_crop(min(whitespace_count, excess))
636
-
637
- def set_length(self, new_length: int) -> None:
638
- """Set new length of the text, clipping or padding is required."""
639
- length = len(self)
640
- if length != new_length:
641
- if length < new_length:
642
- self.pad_right(new_length - length)
643
- else:
644
- self.right_crop(length - new_length)
645
-
646
- def __rich_console__(
647
- self, console: "Console", options: "ConsoleOptions"
648
- ) -> Iterable[Segment]:
649
- tab_size: int = console.tab_size or self.tab_size or 8
650
- justify = self.justify or options.justify or DEFAULT_JUSTIFY
651
-
652
- overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
653
-
654
- lines = self.wrap(
655
- console,
656
- options.max_width,
657
- justify=justify,
658
- overflow=overflow,
659
- tab_size=tab_size or 8,
660
- no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
661
- )
662
- all_lines = Text("\n").join(lines)
663
- yield from all_lines.render(console, end=self.end)
664
-
665
- def __rich_measure__(
666
- self, console: "Console", options: "ConsoleOptions"
667
- ) -> Measurement:
668
- text = self.plain
669
- lines = text.splitlines()
670
- max_text_width = max(cell_len(line) for line in lines) if lines else 0
671
- words = text.split()
672
- min_text_width = (
673
- max(cell_len(word) for word in words) if words else max_text_width
674
- )
675
- return Measurement(min_text_width, max_text_width)
676
-
677
- def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
678
- """Render the text as Segments.
679
-
680
- Args:
681
- console (Console): Console instance.
682
- end (Optional[str], optional): Optional end character.
683
-
684
- Returns:
685
- Iterable[Segment]: Result of render that may be written to the console.
686
- """
687
- _Segment = Segment
688
- text = self.plain
689
- if not self._spans:
690
- yield Segment(text)
691
- if end:
692
- yield _Segment(end)
693
- return
694
- get_style = partial(console.get_style, default=Style.null())
695
-
696
- enumerated_spans = list(enumerate(self._spans, 1))
697
- style_map = {index: get_style(span.style) for index, span in enumerated_spans}
698
- style_map[0] = get_style(self.style)
699
-
700
- spans = [
701
- (0, False, 0),
702
- *((span.start, False, index) for index, span in enumerated_spans),
703
- *((span.end, True, index) for index, span in enumerated_spans),
704
- (len(text), True, 0),
705
- ]
706
- spans.sort(key=itemgetter(0, 1))
707
-
708
- stack: List[int] = []
709
- stack_append = stack.append
710
- stack_pop = stack.remove
711
-
712
- style_cache: Dict[Tuple[Style, ...], Style] = {}
713
- style_cache_get = style_cache.get
714
- combine = Style.combine
715
-
716
- def get_current_style() -> Style:
717
- """Construct current style from stack."""
718
- styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
719
- cached_style = style_cache_get(styles)
720
- if cached_style is not None:
721
- return cached_style
722
- current_style = combine(styles)
723
- style_cache[styles] = current_style
724
- return current_style
725
-
726
- for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
727
- if leaving:
728
- stack_pop(style_id)
729
- else:
730
- stack_append(style_id)
731
- if next_offset > offset:
732
- yield _Segment(text[offset:next_offset], get_current_style())
733
- if end:
734
- yield _Segment(end)
735
-
736
- def join(self, lines: Iterable["Text"]) -> "Text":
737
- """Join text together with this instance as the separator.
738
-
739
- Args:
740
- lines (Iterable[Text]): An iterable of Text instances to join.
741
-
742
- Returns:
743
- Text: A new text instance containing join text.
744
- """
745
-
746
- new_text = self.blank_copy()
747
-
748
- def iter_text() -> Iterable["Text"]:
749
- if self.plain:
750
- for last, line in loop_last(lines):
751
- yield line
752
- if not last:
753
- yield self
754
- else:
755
- yield from lines
756
-
757
- extend_text = new_text._text.extend
758
- append_span = new_text._spans.append
759
- extend_spans = new_text._spans.extend
760
- offset = 0
761
- _Span = Span
762
-
763
- for text in iter_text():
764
- extend_text(text._text)
765
- if text.style:
766
- append_span(_Span(offset, offset + len(text), text.style))
767
- extend_spans(
768
- _Span(offset + start, offset + end, style)
769
- for start, end, style in text._spans
770
- )
771
- offset += len(text)
772
- new_text._length = offset
773
- return new_text
774
-
775
- def expand_tabs(self, tab_size: Optional[int] = None) -> None:
776
- """Converts tabs to spaces.
777
-
778
- Args:
779
- tab_size (int, optional): Size of tabs. Defaults to 8.
780
-
781
- """
782
- if "\t" not in self.plain:
783
- return
784
- pos = 0
785
- if tab_size is None:
786
- tab_size = self.tab_size
787
- assert tab_size is not None
788
- result = self.blank_copy()
789
- append = result.append
790
-
791
- _style = self.style
792
- for line in self.split("\n", include_separator=True):
793
- parts = line.split("\t", include_separator=True)
794
- for part in parts:
795
- if part.plain.endswith("\t"):
796
- part._text = [part.plain[:-1] + " "]
797
- append(part)
798
- pos += len(part)
799
- spaces = tab_size - ((pos - 1) % tab_size) - 1
800
- if spaces:
801
- append(" " * spaces, _style)
802
- pos += spaces
803
- else:
804
- append(part)
805
- self._text = [result.plain]
806
- self._length = len(self.plain)
807
- self._spans[:] = result._spans
808
-
809
- def truncate(
810
- self,
811
- max_width: int,
812
- *,
813
- overflow: Optional["OverflowMethod"] = None,
814
- pad: bool = False,
815
- ) -> None:
816
- """Truncate text if it is longer that a given width.
817
-
818
- Args:
819
- max_width (int): Maximum number of characters in text.
820
- overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
821
- pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
822
- """
823
- _overflow = overflow or self.overflow or DEFAULT_OVERFLOW
824
- if _overflow != "ignore":
825
- length = cell_len(self.plain)
826
- if length > max_width:
827
- if _overflow == "ellipsis":
828
- self.plain = set_cell_size(self.plain, max_width - 1) + "…"
829
- else:
830
- self.plain = set_cell_size(self.plain, max_width)
831
- if pad and length < max_width:
832
- spaces = max_width - length
833
- self._text = [f"{self.plain}{' ' * spaces}"]
834
- self._length = len(self.plain)
835
-
836
- def _trim_spans(self) -> None:
837
- """Remove or modify any spans that are over the end of the text."""
838
- max_offset = len(self.plain)
839
- _Span = Span
840
- self._spans[:] = [
841
- (
842
- span
843
- if span.end < max_offset
844
- else _Span(span.start, min(max_offset, span.end), span.style)
845
- )
846
- for span in self._spans
847
- if span.start < max_offset
848
- ]
849
-
850
- def pad(self, count: int, character: str = " ") -> None:
851
- """Pad left and right with a given number of characters.
852
-
853
- Args:
854
- count (int): Width of padding.
855
- """
856
- assert len(character) == 1, "Character must be a string of length 1"
857
- if count:
858
- pad_characters = character * count
859
- self.plain = f"{pad_characters}{self.plain}{pad_characters}"
860
- _Span = Span
861
- self._spans[:] = [
862
- _Span(start + count, end + count, style)
863
- for start, end, style in self._spans
864
- ]
865
-
866
- def pad_left(self, count: int, character: str = " ") -> None:
867
- """Pad the left with a given character.
868
-
869
- Args:
870
- count (int): Number of characters to pad.
871
- character (str, optional): Character to pad with. Defaults to " ".
872
- """
873
- assert len(character) == 1, "Character must be a string of length 1"
874
- if count:
875
- self.plain = f"{character * count}{self.plain}"
876
- _Span = Span
877
- self._spans[:] = [
878
- _Span(start + count, end + count, style)
879
- for start, end, style in self._spans
880
- ]
881
-
882
- def pad_right(self, count: int, character: str = " ") -> None:
883
- """Pad the right with a given character.
884
-
885
- Args:
886
- count (int): Number of characters to pad.
887
- character (str, optional): Character to pad with. Defaults to " ".
888
- """
889
- assert len(character) == 1, "Character must be a string of length 1"
890
- if count:
891
- self.plain = f"{self.plain}{character * count}"
892
-
893
- def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
894
- """Align text to a given width.
895
-
896
- Args:
897
- align (AlignMethod): One of "left", "center", or "right".
898
- width (int): Desired width.
899
- character (str, optional): Character to pad with. Defaults to " ".
900
- """
901
- self.truncate(width)
902
- excess_space = width - cell_len(self.plain)
903
- if excess_space:
904
- if align == "left":
905
- self.pad_right(excess_space, character)
906
- elif align == "center":
907
- left = excess_space // 2
908
- self.pad_left(left, character)
909
- self.pad_right(excess_space - left, character)
910
- else:
911
- self.pad_left(excess_space, character)
912
-
913
- def append(
914
- self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
915
- ) -> "Text":
916
- """Add text with an optional style.
917
-
918
- Args:
919
- text (Union[Text, str]): A str or Text to append.
920
- style (str, optional): A style name. Defaults to None.
921
-
922
- Returns:
923
- Text: Returns self for chaining.
924
- """
925
-
926
- if not isinstance(text, (str, Text)):
927
- raise TypeError("Only str or Text can be appended to Text")
928
-
929
- if len(text):
930
- if isinstance(text, str):
931
- sanitized_text = strip_control_codes(text)
932
- self._text.append(sanitized_text)
933
- offset = len(self)
934
- text_length = len(sanitized_text)
935
- if style is not None:
936
- self._spans.append(Span(offset, offset + text_length, style))
937
- self._length += text_length
938
- elif isinstance(text, Text):
939
- _Span = Span
940
- if style is not None:
941
- raise ValueError(
942
- "style must not be set when appending Text instance"
943
- )
944
- text_length = self._length
945
- if text.style is not None:
946
- self._spans.append(
947
- _Span(text_length, text_length + len(text), text.style)
948
- )
949
- self._text.append(text.plain)
950
- self._spans.extend(
951
- _Span(start + text_length, end + text_length, style)
952
- for start, end, style in text._spans
953
- )
954
- self._length += len(text)
955
- return self
956
-
957
- def append_text(self, text: "Text") -> "Text":
958
- """Append another Text instance. This method is more performant that Text.append, but
959
- only works for Text.
960
-
961
- Returns:
962
- Text: Returns self for chaining.
963
- """
964
- _Span = Span
965
- text_length = self._length
966
- if text.style is not None:
967
- self._spans.append(_Span(text_length, text_length + len(text), text.style))
968
- self._text.append(text.plain)
969
- self._spans.extend(
970
- _Span(start + text_length, end + text_length, style)
971
- for start, end, style in text._spans
972
- )
973
- self._length += len(text)
974
- return self
975
-
976
- def append_tokens(
977
- self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
978
- ) -> "Text":
979
- """Append iterable of str and style. Style may be a Style instance or a str style definition.
980
-
981
- Args:
982
- pairs (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
983
-
984
- Returns:
985
- Text: Returns self for chaining.
986
- """
987
- append_text = self._text.append
988
- append_span = self._spans.append
989
- _Span = Span
990
- offset = len(self)
991
- for content, style in tokens:
992
- append_text(content)
993
- if style is not None:
994
- append_span(_Span(offset, offset + len(content), style))
995
- offset += len(content)
996
- self._length = offset
997
- return self
998
-
999
- def copy_styles(self, text: "Text") -> None:
1000
- """Copy styles from another Text instance.
1001
-
1002
- Args:
1003
- text (Text): A Text instance to copy styles from, must be the same length.
1004
- """
1005
- self._spans.extend(text._spans)
1006
-
1007
- def split(
1008
- self,
1009
- separator: str = "\n",
1010
- *,
1011
- include_separator: bool = False,
1012
- allow_blank: bool = False,
1013
- ) -> Lines:
1014
- """Split rich text in to lines, preserving styles.
1015
-
1016
- Args:
1017
- separator (str, optional): String to split on. Defaults to "\\\\n".
1018
- include_separator (bool, optional): Include the separator in the lines. Defaults to False.
1019
- allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
1020
-
1021
- Returns:
1022
- List[RichText]: A list of rich text, one per line of the original.
1023
- """
1024
- assert separator, "separator must not be empty"
1025
-
1026
- text = self.plain
1027
- if separator not in text:
1028
- return Lines([self.copy()])
1029
-
1030
- if include_separator:
1031
- lines = self.divide(
1032
- match.end() for match in re.finditer(re.escape(separator), text)
1033
- )
1034
- else:
1035
-
1036
- def flatten_spans() -> Iterable[int]:
1037
- for match in re.finditer(re.escape(separator), text):
1038
- start, end = match.span()
1039
- yield start
1040
- yield end
1041
-
1042
- lines = Lines(
1043
- line for line in self.divide(flatten_spans()) if line.plain != separator
1044
- )
1045
-
1046
- if not allow_blank and text.endswith(separator):
1047
- lines.pop()
1048
-
1049
- return lines
1050
-
1051
- def divide(self, offsets: Iterable[int]) -> Lines:
1052
- """Divide text in to a number of lines at given offsets.
1053
-
1054
- Args:
1055
- offsets (Iterable[int]): Offsets used to divide text.
1056
-
1057
- Returns:
1058
- Lines: New RichText instances between offsets.
1059
- """
1060
- _offsets = list(offsets)
1061
-
1062
- if not _offsets:
1063
- return Lines([self.copy()])
1064
-
1065
- text = self.plain
1066
- text_length = len(text)
1067
- divide_offsets = [0, *_offsets, text_length]
1068
- line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
1069
-
1070
- style = self.style
1071
- justify = self.justify
1072
- overflow = self.overflow
1073
- _Text = Text
1074
- new_lines = Lines(
1075
- _Text(
1076
- text[start:end],
1077
- style=style,
1078
- justify=justify,
1079
- overflow=overflow,
1080
- )
1081
- for start, end in line_ranges
1082
- )
1083
- if not self._spans:
1084
- return new_lines
1085
-
1086
- _line_appends = [line._spans.append for line in new_lines._lines]
1087
- line_count = len(line_ranges)
1088
- _Span = Span
1089
-
1090
- for span_start, span_end, style in self._spans:
1091
-
1092
- lower_bound = 0
1093
- upper_bound = line_count
1094
- start_line_no = (lower_bound + upper_bound) // 2
1095
-
1096
- while True:
1097
- line_start, line_end = line_ranges[start_line_no]
1098
- if span_start < line_start:
1099
- upper_bound = start_line_no - 1
1100
- elif span_start > line_end:
1101
- lower_bound = start_line_no + 1
1102
- else:
1103
- break
1104
- start_line_no = (lower_bound + upper_bound) // 2
1105
-
1106
- if span_end < line_end:
1107
- end_line_no = start_line_no
1108
- else:
1109
- end_line_no = lower_bound = start_line_no
1110
- upper_bound = line_count
1111
-
1112
- while True:
1113
- line_start, line_end = line_ranges[end_line_no]
1114
- if span_end < line_start:
1115
- upper_bound = end_line_no - 1
1116
- elif span_end > line_end:
1117
- lower_bound = end_line_no + 1
1118
- else:
1119
- break
1120
- end_line_no = (lower_bound + upper_bound) // 2
1121
-
1122
- for line_no in range(start_line_no, end_line_no + 1):
1123
- line_start, line_end = line_ranges[line_no]
1124
- new_start = max(0, span_start - line_start)
1125
- new_end = min(span_end - line_start, line_end - line_start)
1126
- if new_end > new_start:
1127
- _line_appends[line_no](_Span(new_start, new_end, style))
1128
-
1129
- return new_lines
1130
-
1131
- def right_crop(self, amount: int = 1) -> None:
1132
- """Remove a number of characters from the end of the text."""
1133
- max_offset = len(self.plain) - amount
1134
- _Span = Span
1135
- self._spans[:] = [
1136
- (
1137
- span
1138
- if span.end < max_offset
1139
- else _Span(span.start, min(max_offset, span.end), span.style)
1140
- )
1141
- for span in self._spans
1142
- if span.start < max_offset
1143
- ]
1144
- self._text = [self.plain[:-amount]]
1145
- self._length -= amount
1146
-
1147
- def wrap(
1148
- self,
1149
- console: "Console",
1150
- width: int,
1151
- *,
1152
- justify: Optional["JustifyMethod"] = None,
1153
- overflow: Optional["OverflowMethod"] = None,
1154
- tab_size: int = 8,
1155
- no_wrap: Optional[bool] = None,
1156
- ) -> Lines:
1157
- """Word wrap the text.
1158
-
1159
- Args:
1160
- console (Console): Console instance.
1161
- width (int): Number of characters per line.
1162
- emoji (bool, optional): Also render emoji code. Defaults to True.
1163
- justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
1164
- overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
1165
- tab_size (int, optional): Default tab size. Defaults to 8.
1166
- no_wrap (bool, optional): Disable wrapping, Defaults to False.
1167
-
1168
- Returns:
1169
- Lines: Number of lines.
1170
- """
1171
- wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
1172
- wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
1173
-
1174
- no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
1175
-
1176
- lines = Lines()
1177
- for line in self.split(allow_blank=True):
1178
- if "\t" in line:
1179
- line.expand_tabs(tab_size)
1180
- if no_wrap:
1181
- new_lines = Lines([line])
1182
- else:
1183
- offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
1184
- new_lines = line.divide(offsets)
1185
- for line in new_lines:
1186
- line.rstrip_end(width)
1187
- if wrap_justify:
1188
- new_lines.justify(
1189
- console, width, justify=wrap_justify, overflow=wrap_overflow
1190
- )
1191
- for line in new_lines:
1192
- line.truncate(width, overflow=wrap_overflow)
1193
- lines.extend(new_lines)
1194
- return lines
1195
-
1196
- def fit(self, width: int) -> Lines:
1197
- """Fit the text in to given width by chopping in to lines.
1198
-
1199
- Args:
1200
- width (int): Maximum characters in a line.
1201
-
1202
- Returns:
1203
- Lines: Lines container.
1204
- """
1205
- lines: Lines = Lines()
1206
- append = lines.append
1207
- for line in self.split():
1208
- line.set_length(width)
1209
- append(line)
1210
- return lines
1211
-
1212
- def detect_indentation(self) -> int:
1213
- """Auto-detect indentation of code.
1214
-
1215
- Returns:
1216
- int: Number of spaces used to indent code.
1217
- """
1218
-
1219
- _indentations = {
1220
- len(match.group(1))
1221
- for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
1222
- }
1223
-
1224
- try:
1225
- indentation = (
1226
- reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
1227
- )
1228
- except TypeError:
1229
- indentation = 1
1230
-
1231
- return indentation
1232
-
1233
- def with_indent_guides(
1234
- self,
1235
- indent_size: Optional[int] = None,
1236
- *,
1237
- character: str = "│",
1238
- style: StyleType = "dim green",
1239
- ) -> "Text":
1240
- """Adds indent guide lines to text.
1241
-
1242
- Args:
1243
- indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
1244
- character (str, optional): Character to use for indentation. Defaults to "│".
1245
- style (Union[Style, str], optional): Style of indent guides.
1246
-
1247
- Returns:
1248
- Text: New text with indentation guides.
1249
- """
1250
-
1251
- _indent_size = self.detect_indentation() if indent_size is None else indent_size
1252
-
1253
- text = self.copy()
1254
- text.expand_tabs()
1255
- indent_line = f"{character}{' ' * (_indent_size - 1)}"
1256
-
1257
- re_indent = re.compile(r"^( *)(.*)$")
1258
- new_lines: List[Text] = []
1259
- add_line = new_lines.append
1260
- blank_lines = 0
1261
- for line in text.split(allow_blank=True):
1262
- match = re_indent.match(line.plain)
1263
- if not match or not match.group(2):
1264
- blank_lines += 1
1265
- continue
1266
- indent = match.group(1)
1267
- full_indents, remaining_space = divmod(len(indent), _indent_size)
1268
- new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
1269
- line.plain = new_indent + line.plain[len(new_indent) :]
1270
- line.stylize(style, 0, len(new_indent))
1271
- if blank_lines:
1272
- new_lines.extend([Text(new_indent, style=style)] * blank_lines)
1273
- blank_lines = 0
1274
- add_line(line)
1275
- if blank_lines:
1276
- new_lines.extend([Text("", style=style)] * blank_lines)
1277
-
1278
- new_text = text.blank_copy("\n").join(new_lines)
1279
- return new_text
1280
-
1281
-
1282
- if __name__ == "__main__": # pragma: no cover
1283
- from pip._vendor.rich.console import Console
1284
-
1285
- text = Text(
1286
- """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
1287
- )
1288
- text.highlight_words(["Lorem"], "bold")
1289
- text.highlight_words(["ipsum"], "italic")
1290
-
1291
- console = Console()
1292
-
1293
- console.rule("justify='left'")
1294
- console.print(text, style="red")
1295
- console.print()
1296
-
1297
- console.rule("justify='center'")
1298
- console.print(text, style="green", justify="center")
1299
- console.print()
1300
-
1301
- console.rule("justify='right'")
1302
- console.print(text, style="blue", justify="right")
1303
- console.print()
1304
-
1305
- console.rule("justify='full'")
1306
- console.print(text, style="magenta", justify="full")
1307
- console.print()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BorisovMaksim/denoising/app.py DELETED
@@ -1,131 +0,0 @@
1
- import uuid
2
- import ffmpeg
3
- import gradio as gr
4
- from pathlib import Path
5
- from denoisers.SpectralGating import SpectralGating
6
- from huggingface_hub import hf_hub_download
7
- from denoisers.demucs import Demucs
8
- import torch
9
- import torchaudio
10
- import yaml
11
- import argparse
12
-
13
- import os
14
- os.environ['CURL_CA_BUNDLE'] = ''
15
- SAMPLE_RATE = 32000
16
-
17
-
18
- def denoising_transform(audio, model):
19
- src_path = Path("cache_wav/original/{}.wav".format(str(uuid.uuid4())))
20
- tgt_path = Path("cache_wav/denoised/{}.wav".format(str(uuid.uuid4())))
21
- src_path.parent.mkdir(exist_ok=True, parents=True)
22
- tgt_path.parent.mkdir(exist_ok=True, parents=True)
23
- (ffmpeg.input(audio)
24
- .output(src_path.as_posix(), acodec='pcm_s16le', ac=1, ar=SAMPLE_RATE)
25
- .run()
26
- )
27
- wav, rate = torchaudio.load(src_path)
28
- reduced_noise = model.predict(wav)
29
- torchaudio.save(tgt_path, reduced_noise, rate)
30
- return src_path, tgt_path
31
-
32
-
33
- def run_app(model_filename, config_filename, port, concurrency_count, max_size):
34
- model_path = hf_hub_download(repo_id="BorisovMaksim/demucs", filename=model_filename)
35
- config_path = hf_hub_download(repo_id="BorisovMaksim/demucs", filename=config_filename)
36
- with open(config_path, 'r') as f:
37
- config = yaml.safe_load(f)
38
- model = Demucs(config['demucs'])
39
- checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
40
- model.load_state_dict(checkpoint['model_state_dict'])
41
-
42
- title = "Denoising"
43
-
44
-
45
- with gr.Blocks(title=title) as app:
46
- with gr.Row():
47
- with gr.Column():
48
- gr.Markdown(
49
- """
50
- # Denoising
51
- ## Instruction: \n
52
- 1. Press "Record from microphone"
53
- 2. Press "Stop recording"
54
- 3. Press "Enhance" \n
55
- - You can switch to the tab "File" to upload a prerecorded .wav audio instead of recording from microphone.
56
- """
57
- )
58
- with gr.Tab("Microphone"):
59
- microphone = gr.Audio(label="Source Audio", source="microphone", type='filepath')
60
- with gr.Row():
61
- microphone_button = gr.Button("Enhance", variant="primary")
62
- with gr.Tab("File"):
63
- upload = gr.Audio(label="Upload Audio", source="upload", type='filepath')
64
- with gr.Row():
65
- upload_button = gr.Button("Enhance", variant="primary")
66
- clear_btn = gr.Button("Clear")
67
- gr.Examples(examples=[[path] for path in Path("testing/wavs/").glob("*.wav")],
68
- inputs=[microphone, upload])
69
-
70
- with gr.Column():
71
- outputs = [gr.Audio(label="Input Audio", type='filepath'),
72
- gr.Audio(label="Demucs Enhancement", type='filepath'),
73
- gr.Audio(label="Spectral Gating Enhancement", type='filepath')
74
- ]
75
-
76
- def submit(audio):
77
- src_path, demucs_tgt_path = denoising_transform(audio, model)
78
- _, spectral_gating_tgt_path = denoising_transform(audio, SpectralGating())
79
- return src_path, demucs_tgt_path, spectral_gating_tgt_path, gr.update(visible=False), gr.update(visible=False)
80
-
81
-
82
-
83
- microphone_button.click(
84
- submit,
85
- microphone,
86
- outputs + [microphone, upload]
87
- )
88
- upload_button.click(
89
- submit,
90
- upload,
91
- outputs + [microphone, upload]
92
- )
93
-
94
-
95
- def restart():
96
- return microphone.update(visible=True, value=None), upload.update(visible=True, value=None), None, None, None
97
-
98
- clear_btn.click(restart, inputs=[], outputs=[microphone, upload] + outputs)
99
-
100
- app.queue(concurrency_count=concurrency_count, max_size=max_size)
101
-
102
- app.launch(
103
- server_name='0.0.0.0',
104
- server_port=port,
105
- )
106
-
107
-
108
-
109
-
110
- if __name__ == "__main__":
111
- parser = argparse.ArgumentParser(description='Running demo.')
112
- parser.add_argument('--port',
113
- type=int,
114
- default=7860)
115
- parser.add_argument('--model_filename',
116
- type=str,
117
- default="paper_replica_10_epoch/Demucs_replicate_paper_continue_epoch45.pt")
118
- parser.add_argument('--config_filename',
119
- type=str,
120
- default="paper_replica_10_epoch/config.yaml")
121
- parser.add_argument('--concurrency_count',
122
- type=int,
123
- default=4)
124
- parser.add_argument('--max_size',
125
- type=int,
126
- default=15)
127
-
128
- args = parser.parse_args()
129
-
130
-
131
- run_app(args.model_filename, args.config_filename, args.port, args.concurrency_count, args.max_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brainclub5000/wesley7137-Llama-2-13B-Nous-Hermes-vicuna-uncensored-mastermod-spych/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Wesley7137 Llama 2 13B Nous Hermes Vicuna Uncensored Mastermod Spych
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/HYTTS/text/korean.py DELETED
@@ -1,205 +0,0 @@
1
- import re
2
- from jamo import h2j, j2hcj
3
- import ko_pron
4
-
5
-
6
- # This is a list of Korean classifiers preceded by pure Korean numerals.
7
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
8
-
9
- # List of (hangul, hangul divided) pairs:
10
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
11
- ('ㄳ', 'ㄱㅅ'),
12
- ('ㄵ', 'ㄴㅈ'),
13
- ('ㄶ', 'ㄴㅎ'),
14
- ('ㄺ', 'ㄹㄱ'),
15
- ('ㄻ', 'ㄹㅁ'),
16
- ('ㄼ', 'ㄹㅂ'),
17
- ('ㄽ', 'ㄹㅅ'),
18
- ('ㄾ', 'ㄹㅌ'),
19
- ('ㄿ', 'ㄹㅍ'),
20
- ('ㅀ', 'ㄹㅎ'),
21
- ('ㅄ', 'ㅂㅅ'),
22
- ('ㅘ', 'ㅗㅏ'),
23
- ('ㅙ', 'ㅗㅐ'),
24
- ('ㅚ', 'ㅗㅣ'),
25
- ('ㅝ', 'ㅜㅓ'),
26
- ('ㅞ', 'ㅜㅔ'),
27
- ('ㅟ', 'ㅜㅣ'),
28
- ('ㅢ', 'ㅡㅣ'),
29
- ('ㅑ', 'ㅣㅏ'),
30
- ('ㅒ', 'ㅣㅐ'),
31
- ('ㅕ', 'ㅣㅓ'),
32
- ('ㅖ', 'ㅣㅔ'),
33
- ('ㅛ', 'ㅣㅗ'),
34
- ('ㅠ', 'ㅣㅜ')
35
- ]]
36
-
37
- # List of (Latin alphabet, hangul) pairs:
38
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
39
- ('a', '에이'),
40
- ('b', '비'),
41
- ('c', '시'),
42
- ('d', '디'),
43
- ('e', '이'),
44
- ('f', '에프'),
45
- ('g', '지'),
46
- ('h', '에이치'),
47
- ('i', '아이'),
48
- ('j', '제이'),
49
- ('k', '케이'),
50
- ('l', '엘'),
51
- ('m', '엠'),
52
- ('n', '엔'),
53
- ('o', '오'),
54
- ('p', '피'),
55
- ('q', '큐'),
56
- ('r', '아르'),
57
- ('s', '에스'),
58
- ('t', '티'),
59
- ('u', '유'),
60
- ('v', '브이'),
61
- ('w', '더블유'),
62
- ('x', '엑스'),
63
- ('y', '와이'),
64
- ('z', '제트')
65
- ]]
66
-
67
- # List of (ipa, lazy ipa) pairs:
68
- _ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
69
- ('t͡ɕ','ʧ'),
70
- ('d͡ʑ','ʥ'),
71
- ('ɲ','n^'),
72
- ('ɕ','ʃ'),
73
- ('ʷ','w'),
74
- ('ɭ','l`'),
75
- ('ʎ','ɾ'),
76
- ('ɣ','ŋ'),
77
- ('ɰ','ɯ'),
78
- ('ʝ','j'),
79
- ('ʌ','ə'),
80
- ('ɡ','g'),
81
- ('\u031a','#'),
82
- ('\u0348','='),
83
- ('\u031e',''),
84
- ('\u0320',''),
85
- ('\u0339','')
86
- ]]
87
-
88
-
89
- def latin_to_hangul(text):
90
- for regex, replacement in _latin_to_hangul:
91
- text = re.sub(regex, replacement, text)
92
- return text
93
-
94
-
95
- def divide_hangul(text):
96
- text = j2hcj(h2j(text))
97
- for regex, replacement in _hangul_divided:
98
- text = re.sub(regex, replacement, text)
99
- return text
100
-
101
-
102
- def hangul_number(num, sino=True):
103
- '''Reference https://github.com/Kyubyong/g2pK'''
104
- num = re.sub(',', '', num)
105
-
106
- if num == '0':
107
- return '영'
108
- if not sino and num == '20':
109
- return '스무'
110
-
111
- digits = '123456789'
112
- names = '일이삼사오육칠팔구'
113
- digit2name = {d: n for d, n in zip(digits, names)}
114
-
115
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
116
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
117
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
118
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
119
-
120
- spelledout = []
121
- for i, digit in enumerate(num):
122
- i = len(num) - i - 1
123
- if sino:
124
- if i == 0:
125
- name = digit2name.get(digit, '')
126
- elif i == 1:
127
- name = digit2name.get(digit, '') + '십'
128
- name = name.replace('일십', '십')
129
- else:
130
- if i == 0:
131
- name = digit2mod.get(digit, '')
132
- elif i == 1:
133
- name = digit2dec.get(digit, '')
134
- if digit == '0':
135
- if i % 4 == 0:
136
- last_three = spelledout[-min(3, len(spelledout)):]
137
- if ''.join(last_three) == '':
138
- spelledout.append('')
139
- continue
140
- else:
141
- spelledout.append('')
142
- continue
143
- if i == 2:
144
- name = digit2name.get(digit, '') + '백'
145
- name = name.replace('일백', '백')
146
- elif i == 3:
147
- name = digit2name.get(digit, '') + '천'
148
- name = name.replace('일천', '천')
149
- elif i == 4:
150
- name = digit2name.get(digit, '') + '만'
151
- name = name.replace('일만', '만')
152
- elif i == 5:
153
- name = digit2name.get(digit, '') + '십'
154
- name = name.replace('일십', '십')
155
- elif i == 6:
156
- name = digit2name.get(digit, '') + '백'
157
- name = name.replace('일백', '백')
158
- elif i == 7:
159
- name = digit2name.get(digit, '') + '천'
160
- name = name.replace('일천', '천')
161
- elif i == 8:
162
- name = digit2name.get(digit, '') + '억'
163
- elif i == 9:
164
- name = digit2name.get(digit, '') + '십'
165
- elif i == 10:
166
- name = digit2name.get(digit, '') + '백'
167
- elif i == 11:
168
- name = digit2name.get(digit, '') + '천'
169
- elif i == 12:
170
- name = digit2name.get(digit, '') + '조'
171
- elif i == 13:
172
- name = digit2name.get(digit, '') + '십'
173
- elif i == 14:
174
- name = digit2name.get(digit, '') + '백'
175
- elif i == 15:
176
- name = digit2name.get(digit, '') + '천'
177
- spelledout.append(name)
178
- return ''.join(elem for elem in spelledout)
179
-
180
-
181
- def number_to_hangul(text):
182
- '''Reference https://github.com/Kyubyong/g2pK'''
183
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
184
- for token in tokens:
185
- num, classifier = token
186
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
187
- spelledout = hangul_number(num, sino=False)
188
- else:
189
- spelledout = hangul_number(num, sino=True)
190
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
191
- # digit by digit for remaining digits
192
- digits = '0123456789'
193
- names = '영일이삼사오육칠팔구'
194
- for d, n in zip(digits, names):
195
- text = text.replace(d, n)
196
- return text
197
-
198
-
199
- def korean_to_lazy_ipa(text):
200
- text = latin_to_hangul(text)
201
- text = number_to_hangul(text)
202
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa'),text).split('] ~ [')[0]
203
- for regex, replacement in _ipa_to_lazy_ipa:
204
- text = re.sub(regex, replacement, text)
205
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AutoGPT
3
- emoji: 🦾
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: ui/app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: aliabid94/AutoGPT
12
- ---
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chris4K/llms_compare/Ek Villain 2014 Full Movie In Hindi Download.md DELETED
@@ -1,88 +0,0 @@
1
- ## ek villain 2014 full movie in hindi download
2
-
3
-
4
-
5
-
6
-
7
- ![Ek Villain 2014 Full Movie In Hindi Download](https://filmykeeday.com/wp-content/uploads/2014/05/ek-villain-poster-list.jpg.webp)
8
-
9
-
10
-
11
-
12
-
13
- **Download - [https://www.google.com/url?q=https%3A%2F%2Furlca.com%2F2txP17&sa=D&sntz=1&usg=AOvVaw0tYgfDg5CL48ZZaHV8hpty](https://www.google.com/url?q=https%3A%2F%2Furlca.com%2F2txP17&sa=D&sntz=1&usg=AOvVaw0tYgfDg5CL48ZZaHV8hpty)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- Here is a possible title and article with SEO optimization and HTML formatting for the keyword "ek villain 2014 full movie in hindi download":
26
-
27
- # How to Watch Ek Villain (2014) Full Movie in Hindi Online
28
-
29
-
30
-
31
- Ek Villain is a 2014 Bollywood thriller movie starring Sidharth Malhotra, Shraddha Kapoor and Riteish Deshmukh. The movie revolves around Guru, a former gangster who falls in love with Aisha, a terminally ill girl. However, their happiness is shattered when Aisha is killed by a serial killer. Guru sets out to avenge her death and find the killer.
32
-
33
-
34
-
35
- If you are looking for ways to watch Ek Villain full movie in Hindi online, you have come to the right place. In this article, we will show you some of the best platforms where you can stream or download Ek Villain legally and safely.
36
-
37
-
38
-
39
- ## Disney+ Hotstar
40
-
41
-
42
-
43
- Disney+ Hotstar is one of the most popular streaming services in India, offering a vast collection of movies, TV shows, sports and live events. You can watch Ek Villain on Disney+ Hotstar with a premium subscription that costs Rs. 299 per month or Rs. 1499 per year. You can also get a VIP subscription for Rs. 399 per year that gives you access to select movies and shows.
44
-
45
-
46
-
47
- To watch Ek Villain on Disney+ Hotstar, you need to sign up for an account and choose a subscription plan. Then, you can search for Ek Villain in the search bar or browse through the genres and categories. You can also use the Watchlist feature to save the movie for later viewing. You can watch Ek Villain on any device that supports Disney+ Hotstar, such as smartphones, tablets, laptops, smart TVs and streaming devices.
48
-
49
-
50
-
51
- ## PogoLinks
52
-
53
-
54
-
55
- PogoLinks is a website that provides links to download Bollywood and Hollywood movies in various qualities and formats. You can download Ek Villain from PogoLinks in full HD quality with Hindi audio. The movie is available in 480p, 720p and 1080p resolutions and MKV format.
56
-
57
-
58
-
59
- To download Ek Villain from PogoLinks, you need to visit the website and search for Ek Villain in the search box. Then, you will see a list of download links from different sources. You can choose any link that suits your preference and click on it. You will be redirected to another page where you have to verify that you are not a robot and then click on Download Now. You can then save the movie file on your device or watch it online.
60
-
61
-
62
-
63
- ## Other Options
64
-
65
-
66
-
67
- If you are not satisfied with the above options, you can also try some other platforms where you can watch Ek Villain online. However, these platforms may not be legal or safe, so we advise you to use them at your own risk.
68
-
69
-
70
-
71
- - Archive.org: Archive.org is a digital library that hosts millions of free books, music, videos and more. You can find Ek Villain on Archive.org as a part of its Bollywood collection. You can stream or download the movie from this website for free.
72
-
73
- - Torrent Sites: Torrent sites are another way to download Ek Villain full movie in Hindi online. However, torrenting is illegal and risky in many countries, as it may expose you to malware, viruses and legal issues. Therefore, we do not recommend using torrent sites to download Ek Villain or any other movie.
74
-
75
-
76
-
77
- ## Conclusion
78
-
79
-
80
-
81
- Ek Villain is a gripping and emotional movie that will keep you hooked till the end. If you want to watch Ek Villain full movie in Hindi online, you can use any of the platforms mentioned above. However, we suggest that you use legal and safe platforms like Disney+ Hotstar or PogoLinks to enjoy the movie without any hassle or worry.
82
-
83
- dfd1c89656
84
-
85
-
86
-
87
-
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/docs/develop.md DELETED
@@ -1,135 +0,0 @@
1
- # 新表情编写指北
2
-
3
- ## 表情注册
4
-
5
- meme-generator 会以包的形式加载表情,通过 `add_meme` 函数来“注册”一个表情
6
-
7
- 以 `petpet` 表情为例,文件结构如下:
8
-
9
- ```
10
- meme_generator/memes/petpet
11
- ├── __init__.py # 表情制作程序
12
- └── images # 表情需要的图片文件
13
- ├── 0.png
14
- ├── 1.png
15
- ├── 2.png
16
- ├── 3.png
17
- └── 4.png
18
- ```
19
-
20
- 在不考虑额外参数的情况下,`petpet` 表情的 `__init__.py` 编写如下:
21
-
22
- ```python
23
- from typing import List
24
- from pathlib import Path
25
- from pil_utils import BuildImage
26
- from PIL.Image import Image as IMG
27
-
28
- from meme_generator.utils import save_gif
29
- from meme_generator import add_meme
30
-
31
-
32
- img_dir = Path(__file__).parent / "images"
33
-
34
-
35
- def petpet(images: List[BuildImage], texts, args):
36
- """表情制作函数
37
-
38
- 函数会接收 3 个参数:
39
- - `images`: 传入的图片列表,类型为 `pil_utils.BuildImage`
40
- - `texts`: 传入的文字列表,类型为 `str`
41
- - `args`: 其他参数,类型为 `meme_generator.meme.MemeArgsModel`
42
- """
43
- img = images[0].convert("RGBA").square()
44
- frames: List[IMG] = []
45
- locs = [
46
- (14, 20, 98, 98),
47
- (12, 33, 101, 85),
48
- (8, 40, 110, 76),
49
- (10, 33, 102, 84),
50
- (12, 20, 98, 98),
51
- ]
52
- for i in range(5):
53
- hand = BuildImage.open(img_dir / f"{i}.png")
54
- frame = BuildImage.new("RGBA", hand.size, (255, 255, 255, 0))
55
- x, y, w, h = locs[i]
56
- frame.paste(img.resize((w, h)), (x, y), alpha=True)
57
- frame.paste(hand, alpha=True)
58
- frames.append(frame.image)
59
- return save_gif(frames, 0.06)
60
-
61
-
62
- add_meme(
63
- "petpet", # 表情唯一名
64
- petpet, # 表情制作函数
65
- min_images=1, # 至少需要 1 张图片
66
- max_images=1, # 另有 `min_texts` 和 `max_texts` 选项来控制传入文字的数量
67
- keywords=["摸", "摸摸", "摸头", "rua"], # 关键词,填写言简意赅的词语,用于展示表情含义、方便聊天Bot调用等
68
- )
69
- ```
70
-
71
- 通常情况下,建议每个表情一个文件夹,表情所需的图片文件等都放置于该文件夹中,方便增删表情
72
-
73
- 也可以一个文件中注册多个表情,如:[gif_subtitle](../meme_generator/memes/gif_subtitle/__init__.py)
74
-
75
-
76
- ## 参数定义
77
-
78
- 部分表情需要额外的参数。表情参数的类型定义如下:
79
-
80
- ```python
81
- @dataclass
82
- class MemeArgsType:
83
- parser: MemeArgsParser # 参数解析器,将命令行形式的文本解析为字典形式,方便通过命令行使用
84
- model: Type[MemeArgsModel] # 参数模型,用于验证字典形式的参数,并传入表情制作函数
85
- instances: List[MemeArgsModel] = field(default_factory=list) # 可选,参数模型示例,推荐填写,方便生成不同参数下的预览图
86
- ```
87
-
88
- 以 `petpet` 表情为例,需要定义一个控制图片是否变为圆形的参数 `circle`
89
-
90
- 可以定义如下的 `pydantic` 模型:
91
-
92
- ```python
93
- from pydantic import Field
94
- from meme_generator import MemeArgsModel
95
-
96
- class Model(MemeArgsModel):
97
- circle: bool = Field(False, description="是否将图片变为圆形")
98
- ```
99
-
100
- 定义参数时推荐使用 `Field` 定义默认值,可以定义 `description` 描述参数含义,方便生成文档
101
-
102
- 同时定义如下的参数解析器:
103
-
104
- ```python
105
- from meme_generator import MemeArgsParser
106
-
107
- parser = MemeArgsParser(prefix_chars="-/")
108
- parser.add_argument("--circle", "/圆", action="store_true", help="是否将图片变为圆形")
109
- ```
110
-
111
- 以上参数解析器可以将形如 `["--circle"]` 的参数列表解析为 `{"circle": true}` 的形式,继而通过 `pydantic` 模型验证
112
-
113
- 推荐在定义选项时添加自然语言风格的别名,如 `/圆`,这样可以方便聊天机器人等场合调用,比如可以解析 `摸头 /圆` 这样的文本
114
-
115
- 定义好上述的 `parser` 和 `Model` 后,需要在 `add_meme` 时传入:
116
-
117
- ```python
118
- add_meme(
119
- "petpet",
120
- petpet,
121
- min_images=1,
122
- max_images=1,
123
- args_type=MemeArgsType(
124
- parser,
125
- Model,
126
- [
127
- Model(circle=False),
128
- Model(circle=True),
129
- ],
130
- ),
131
- keywords=["摸", "摸摸", "摸头", "rua"],
132
- )
133
- ```
134
-
135
- 这里传入了 `circle=False` 和 `circle=True` 两个模型实例,可以在生成文档时生成不同参数时的预览图,效果如 [memes.md](memes.md#petpet) 所示
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/client/html/index.html DELETED
@@ -1,135 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="UTF-8" />
5
- <meta http-equiv="X-UA-Compatible" content="IE=edge" />
6
- <meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0" />
7
- <meta name="description" content="A conversational AI system that listens, learns, and challenges" />
8
- <meta property="og:title" content="ChatGPT" />
9
- <meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg" />
10
- <meta
11
- property="og:description"
12
- content="A conversational AI system that listens, learns, and challenges" />
13
- <meta property="og:url" content="https://chat.acy.dev" />
14
- <link rel="stylesheet" href="{{ url_for('bp.static', filename='css/style.css') }}" />
15
- <link
16
- rel="apple-touch-icon"
17
- sizes="180x180"
18
- href="{{ url_for('bp.static', filename='img/apple-touch-icon.png') }}" />
19
- <link
20
- rel="icon"
21
- type="image/png"
22
- sizes="32x32"
23
- href="{{ url_for('bp.static', filename='img/favicon-32x32.png') }}" />
24
- <link
25
- rel="icon"
26
- type="image/png"
27
- sizes="16x16"
28
- href="{{ url_for('bp.static', filename='img/favicon-16x16.png') }}" />
29
- <link rel="manifest" href="{{ url_for('bp.static', filename='img/site.webmanifest') }}" />
30
- <link
31
- rel="stylesheet"
32
- href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@latest/build/styles/base16/dracula.min.css" />
33
- <title>FreeGPT</title>
34
- </head>
35
-
36
- <body data-urlprefix="{{ url_prefix}}">
37
- <div class="main-container">
38
- <div class="box sidebar">
39
- <div class="top">
40
- <button class="button" onclick="new_conversation()">
41
- <i class="fa-regular fa-plus"></i>
42
- <span>{{_('New Conversation')}}</span>
43
- </button>
44
- <div class="spinner"></div>
45
- </div>
46
- <div class="sidebar-footer">
47
- <button class="button" onclick="delete_conversations()">
48
- <i class="fa-regular fa-trash"></i>
49
- <span>{{_('Clear Conversations')}}</span>
50
- </button>
51
- <div class="settings-container">
52
- <div class="checkbox field">
53
- <span>{{_('Dark Mode')}}</span>
54
- <input type="checkbox" id="theme-toggler" />
55
- <label for="theme-toggler"></label>
56
- </div>
57
- <div class="field">
58
- <span>{{_('Language')}}</span>
59
- <select
60
- class="dropdown"
61
- id="language"
62
- onchange="changeLanguage(this.value)"></select>
63
- </div>
64
- </div>
65
- <a class="info" href="https://github.com/ramonvc/gptfree-jailbreak-webui" target="_blank">
66
- <i class="fa-brands fa-github"></i>
67
- <span class="conversation-title"> {{_('Version')}}: 0.1.0 </span>
68
- </a>
69
- </div>
70
- </div>
71
- <div class="conversation">
72
- <div class="stop-generating stop-generating-hidden">
73
- <button class="button" id="cancelButton">
74
- <span>{{_('Stop Generating')}}</span>
75
- </button>
76
- </div>
77
- <div class="box" id="messages"></div>
78
- <div class="user-input">
79
- <div class="box input-box">
80
- <textarea
81
- id="message-input"
82
- placeholder="{{_('Ask a question')}}"
83
- cols="30"
84
- rows="10"
85
- style="white-space: pre-wrap"></textarea>
86
- <div id="send-button">
87
- <i class="fa-regular fa-paper-plane-top"></i>
88
- </div>
89
- </div>
90
- </div>
91
- <div>
92
- <div class="options-container">
93
- <div class="buttons">
94
- <div class="field">
95
- <select class="dropdown" name="model" id="model">
96
- <option value="gpt-3.5-turbo">GPT-3.5</option>
97
- <option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
98
- <option value="gpt-4" selected>GPT-4</option>
99
- </select>
100
- </div>
101
- <div class="field">
102
- <select class="dropdown" name="jailbreak" id="jailbreak">
103
- <option value="default" selected>{{_('Default')}}</option>
104
- <option value="gpt-dan-11.0">{{_('DAN')}}</option>
105
- <option value="gpt-evil">{{_('Evil')}}</option>
106
- </select>
107
- </div>
108
- </div>
109
- <div class="field checkbox">
110
- <input type="checkbox" id="switch" />
111
- <label for="switch"></label>
112
- <span>{{_('Web Access')}}</span>
113
- </div>
114
- </div>
115
- </div>
116
- </div>
117
- </div>
118
- <div class="menu-button">
119
- <i class="fa-solid fa-bars"></i>
120
- </div>
121
-
122
- <!-- scripts -->
123
- <script>
124
- window.conversation_id = "{{ chat_id }}";
125
- </script>
126
- <script src="{{ url_for('bp.static', filename='js/icons.js') }}"></script>
127
- <script src="{{ url_for('bp.static', filename='js/chat.js') }}" defer></script>
128
- <script src="https://cdn.jsdelivr.net/npm/markdown-it@latest/dist/markdown-it.min.js"></script>
129
- <script src="{{ url_for('bp.static', filename='js/highlight.min.js') }}"></script>
130
- <script src="{{ url_for('bp.static', filename='js/highlightjs-copy.min.js') }}"></script>
131
- <script src="{{ url_for('bp.static', filename='js/theme-toggler.js') }}"></script>
132
- <script src="{{ url_for('bp.static', filename='js/sidebar-toggler.js') }}"></script>
133
- <script src="{{ url_for('bp.static', filename='js/change-language.js') }}"></script>
134
- </body>
135
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CornSnakeID/CornSnakeMorphID/README.md DELETED
@@ -1,65 +0,0 @@
1
- ---
2
- title: CornSnakeMorphID
3
- emoji: 🌽🐍
4
- colorFrom: orange
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.18.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Corn Snake Morph ID
13
- I've seen a lot of posts in [r/cornsnakes](https://www.reddit.com/r/cornsnakes/) asking what morph their pet Corn Snake is. For those who don't know, a morph is the genetic coloring or pattern that shows up in the snake. Common traits include:
14
- * Amel (amelanistic): Lack of melanin or black pigment around the saddles (dark splotches) on the snake's back or in it's eyes.
15
- * Blooodred (diffused): Low or no contrast througout the pattern of of the snake's back. The belly will often be white with no checkers.
16
- * Stripe: Striped back, often white belly.
17
-
18
- ## Inference
19
- The model runs on square images of 224x224 resolution. This space will automatically resize the image, but it is reccomended you use the build-in tool to crop to a square.
20
- Tips for accuracy:
21
- * High contrast from background. The snake should stick out.
22
- * Coiled or wrapped snakes. Streched out snakes spead out the patterns, lowering accuracy.
23
- * Include a snake face if possible.
24
- * Use natural light with good white balance, no red lights or extreme lighting.
25
-
26
- Photos on hands work, because a lot of the training set included pictures like that. You should run the model multiple times on different pictures and lighting so ensure the results are accurate.
27
- **This model is a guide to help you begin research**. You should look up pictures and traits of the classified morph to be 100% sure. Not all genes, ages, or variations are represented in this model.
28
-
29
- ## Model Description
30
- This model is based on the Vision Transformer (ViT) which is a transformer encoder model (BERT-like) pretrained on a large collection of images in a supervised fashion, namely ImageNet-21k, at a resolution of 224x224 pixels.
31
- The model was set to multilabel mode, allowing it to classify multiple genes at once, however it only sometimes works. See more details at the (original model page)[https://huggingface.co/google/vit-base-patch16-224-in21k].
32
-
33
- ## Bias
34
- Many of the dataset images were young snakes, usually less that 2 years old. Because markings will sometime change as a snake ages, the model works better on young snakes. Some mutuations such as Sunkissed and Amel/Hypomelanistic are so common that the model predicts those at a much higher rate. Some grey genes like Cinder and Charcoal are often confused, due to the similarity.
35
-
36
- ## Dataset
37
- This model was trained on photo submitted to [r/Cornsnake_Pics](https://www.reddit.com/r/Cornsnake_Pics/), along with the post flairs, augmented with random rotations and hue shifts. The original data containes 5k center cropped images at 512x512 (resized to 224x224 for this model). The augmented data had 3 augmented copies for each image, resulting in about 21k images.
38
-
39
- ## How to use
40
- Here is how to use this model in PyTorch:
41
- ```
42
- import torch
43
- from transformers import ViTForImageClassification, ViTImageProcessor
44
- from PIL import Image
45
- import requests
46
-
47
- url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
48
- image = Image.open(requests.get(url, stream=True).raw)
49
-
50
-
51
- classes = ['amel', 'cinder', 'sunkissed', 'anery', 'motley', 'toffee', 'bloodred', 'tessera', 'caramel', 'charcoal',
52
- 'coral', 'snow', 'fire', 'ghost', 'ultramel', 'stripe', 'hypo', 'kastanie', 'hypomelanistic', 'lava',
53
- 'lavender', 'miami', 'honey', 'wild-type', 'palmetto', 'salmon', 'diffused']
54
-
55
- model = ViTForImageClassification.from_pretrained("IfanSnek/CornSnakes", num_labels=len(classes),
56
- problem_type="multi_label_classification")
57
- model = ViTModel.from_pretrained('IfanSnek/CornSnakes')
58
- inputs = feature_extractor(images=image, return_tensors="pt")
59
-
60
- outputs = model(**inputs)
61
- probs = torch.sigmoid(outputs.logits)[0]
62
- predictions = {}
63
- for i, prob in enumerate(probs[0]):
64
- predictions[classes[i]] = prob.item()
65
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_windows.py DELETED
@@ -1,64 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- import sys
5
- from contextlib import suppress
6
- from errno import EACCES
7
- from pathlib import Path
8
- from typing import cast
9
-
10
- from ._api import BaseFileLock
11
- from ._util import raise_on_not_writable_file
12
-
13
- if sys.platform == "win32": # pragma: win32 cover
14
- import msvcrt
15
-
16
- class WindowsFileLock(BaseFileLock):
17
- """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
18
-
19
- def _acquire(self) -> None:
20
- raise_on_not_writable_file(self.lock_file)
21
- flags = (
22
- os.O_RDWR # open for read and write
23
- | os.O_CREAT # create file if not exists
24
- | os.O_TRUNC # truncate file if not empty
25
- )
26
- try:
27
- fd = os.open(self.lock_file, flags, self._context.mode)
28
- except OSError as exception:
29
- if exception.errno != EACCES: # has no access to this lock
30
- raise
31
- else:
32
- try:
33
- msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
34
- except OSError as exception:
35
- os.close(fd) # close file first
36
- if exception.errno != EACCES: # file is already locked
37
- raise
38
- else:
39
- self._context.lock_file_fd = fd
40
-
41
- def _release(self) -> None:
42
- fd = cast(int, self._context.lock_file_fd)
43
- self._context.lock_file_fd = None
44
- msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
45
- os.close(fd)
46
-
47
- with suppress(OSError): # Probably another instance of the application hat acquired the file lock.
48
- Path(self.lock_file).unlink()
49
-
50
- else: # pragma: win32 no cover
51
-
52
- class WindowsFileLock(BaseFileLock):
53
- """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
54
-
55
- def _acquire(self) -> None:
56
- raise NotImplementedError
57
-
58
- def _release(self) -> None:
59
- raise NotImplementedError
60
-
61
-
62
- __all__ = [
63
- "WindowsFileLock",
64
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_version.py DELETED
@@ -1,16 +0,0 @@
1
- # This file must be kept very simple, because it is consumed from several
2
- # places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
3
-
4
- # We use a simple scheme:
5
- # 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
6
- # where the +dev versions are never released into the wild, they're just what
7
- # we stick into the VCS in between releases.
8
- #
9
- # This is compatible with PEP 440:
10
- # http://legacy.python.org/dev/peps/pep-0440/
11
- # via the use of the "local suffix" "+dev", which is disallowed on index
12
- # servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
13
- # want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
14
- # 1.0.0.)
15
-
16
- __version__ = "0.14.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/huggingface_cli.py DELETED
@@ -1,49 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright 2020 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from argparse import ArgumentParser
17
-
18
- from huggingface_hub.commands.delete_cache import DeleteCacheCommand
19
- from huggingface_hub.commands.env import EnvironmentCommand
20
- from huggingface_hub.commands.lfs import LfsCommands
21
- from huggingface_hub.commands.scan_cache import ScanCacheCommand
22
- from huggingface_hub.commands.user import UserCommands
23
-
24
-
25
- def main():
26
- parser = ArgumentParser("huggingface-cli", usage="huggingface-cli <command> [<args>]")
27
- commands_parser = parser.add_subparsers(help="huggingface-cli command helpers")
28
-
29
- # Register commands
30
- EnvironmentCommand.register_subcommand(commands_parser)
31
- UserCommands.register_subcommand(commands_parser)
32
- LfsCommands.register_subcommand(commands_parser)
33
- ScanCacheCommand.register_subcommand(commands_parser)
34
- DeleteCacheCommand.register_subcommand(commands_parser)
35
-
36
- # Let's go
37
- args = parser.parse_args()
38
-
39
- if not hasattr(args, "func"):
40
- parser.print_help()
41
- exit(1)
42
-
43
- # Run
44
- service = args.func(args)
45
- service.run()
46
-
47
-
48
- if __name__ == "__main__":
49
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/config/defaults.py DELETED
@@ -1,289 +0,0 @@
1
- """
2
- @Date: 2021/07/17
3
- @description:
4
- """
5
- import os
6
- import logging
7
- from yacs.config import CfgNode as CN
8
-
9
- _C = CN()
10
- _C.DEBUG = False
11
- _C.MODE = 'train'
12
- _C.VAL_NAME = 'val'
13
- _C.TAG = 'default'
14
- _C.COMMENT = 'add some comments to help you understand'
15
- _C.SHOW_BAR = True
16
- _C.SAVE_EVAL = False
17
- _C.MODEL = CN()
18
- _C.MODEL.NAME = 'model_name'
19
- _C.MODEL.SAVE_BEST = True
20
- _C.MODEL.SAVE_LAST = True
21
- _C.MODEL.ARGS = []
22
- _C.MODEL.FINE_TUNE = []
23
-
24
- # -----------------------------------------------------------------------------
25
- # Training settings
26
- # -----------------------------------------------------------------------------
27
- _C.TRAIN = CN()
28
- _C.TRAIN.SCRATCH = False
29
- _C.TRAIN.START_EPOCH = 0
30
- _C.TRAIN.EPOCHS = 300
31
- _C.TRAIN.DETERMINISTIC = False
32
- _C.TRAIN.SAVE_FREQ = 5
33
-
34
- _C.TRAIN.BASE_LR = 5e-4
35
-
36
- _C.TRAIN.WARMUP_EPOCHS = 20
37
- _C.TRAIN.WEIGHT_DECAY = 0
38
- _C.TRAIN.WARMUP_LR = 5e-7
39
- _C.TRAIN.MIN_LR = 5e-6
40
- # Clip gradient norm
41
- _C.TRAIN.CLIP_GRAD = 5.0
42
- # Auto resume from latest checkpoint
43
- _C.TRAIN.RESUME_LAST = True
44
- # Gradient accumulation steps
45
- # could be overwritten by command line argument
46
- _C.TRAIN.ACCUMULATION_STEPS = 0
47
- # Whether to use gradient checkpointing to save memory
48
- # could be overwritten by command line argument
49
- _C.TRAIN.USE_CHECKPOINT = False
50
- # 'cpu' or 'cuda:0, 1, 2, 3' or 'cuda'
51
- _C.TRAIN.DEVICE = 'cuda'
52
-
53
- # LR scheduler
54
- _C.TRAIN.LR_SCHEDULER = CN()
55
- _C.TRAIN.LR_SCHEDULER.NAME = ''
56
- _C.TRAIN.LR_SCHEDULER.ARGS = []
57
-
58
-
59
- # Optimizer
60
- _C.TRAIN.OPTIMIZER = CN()
61
- _C.TRAIN.OPTIMIZER.NAME = 'adam'
62
- # Optimizer Epsilon
63
- _C.TRAIN.OPTIMIZER.EPS = 1e-8
64
- # Optimizer Betas
65
- _C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
66
- # SGD momentum
67
- _C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
68
-
69
- # Criterion
70
- _C.TRAIN.CRITERION = CN()
71
- # Boundary loss (Horizon-Net)
72
- _C.TRAIN.CRITERION.BOUNDARY = CN()
73
- _C.TRAIN.CRITERION.BOUNDARY.NAME = 'boundary'
74
- _C.TRAIN.CRITERION.BOUNDARY.LOSS = 'BoundaryLoss'
75
- _C.TRAIN.CRITERION.BOUNDARY.WEIGHT = 0.0
76
- _C.TRAIN.CRITERION.BOUNDARY.WEIGHTS = []
77
- _C.TRAIN.CRITERION.BOUNDARY.NEED_ALL = True
78
- # Up and Down depth loss (LED2-Net)
79
- _C.TRAIN.CRITERION.LEDDepth = CN()
80
- _C.TRAIN.CRITERION.LEDDepth.NAME = 'led_depth'
81
- _C.TRAIN.CRITERION.LEDDepth.LOSS = 'LEDLoss'
82
- _C.TRAIN.CRITERION.LEDDepth.WEIGHT = 0.0
83
- _C.TRAIN.CRITERION.LEDDepth.WEIGHTS = []
84
- _C.TRAIN.CRITERION.LEDDepth.NEED_ALL = True
85
- # Depth loss
86
- _C.TRAIN.CRITERION.DEPTH = CN()
87
- _C.TRAIN.CRITERION.DEPTH.NAME = 'depth'
88
- _C.TRAIN.CRITERION.DEPTH.LOSS = 'L1Loss'
89
- _C.TRAIN.CRITERION.DEPTH.WEIGHT = 0.0
90
- _C.TRAIN.CRITERION.DEPTH.WEIGHTS = []
91
- _C.TRAIN.CRITERION.DEPTH.NEED_ALL = False
92
- # Ratio(Room Height) loss
93
- _C.TRAIN.CRITERION.RATIO = CN()
94
- _C.TRAIN.CRITERION.RATIO.NAME = 'ratio'
95
- _C.TRAIN.CRITERION.RATIO.LOSS = 'L1Loss'
96
- _C.TRAIN.CRITERION.RATIO.WEIGHT = 0.0
97
- _C.TRAIN.CRITERION.RATIO.WEIGHTS = []
98
- _C.TRAIN.CRITERION.RATIO.NEED_ALL = False
99
- # Grad(Normal) loss
100
- _C.TRAIN.CRITERION.GRAD = CN()
101
- _C.TRAIN.CRITERION.GRAD.NAME = 'grad'
102
- _C.TRAIN.CRITERION.GRAD.LOSS = 'GradLoss'
103
- _C.TRAIN.CRITERION.GRAD.WEIGHT = 0.0
104
- _C.TRAIN.CRITERION.GRAD.WEIGHTS = [1.0, 1.0]
105
- _C.TRAIN.CRITERION.GRAD.NEED_ALL = True
106
- # Object loss
107
- _C.TRAIN.CRITERION.OBJECT = CN()
108
- _C.TRAIN.CRITERION.OBJECT.NAME = 'object'
109
- _C.TRAIN.CRITERION.OBJECT.LOSS = 'ObjectLoss'
110
- _C.TRAIN.CRITERION.OBJECT.WEIGHT = 0.0
111
- _C.TRAIN.CRITERION.OBJECT.WEIGHTS = []
112
- _C.TRAIN.CRITERION.OBJECT.NEED_ALL = True
113
- # Heatmap loss
114
- _C.TRAIN.CRITERION.CHM = CN()
115
- _C.TRAIN.CRITERION.CHM.NAME = 'corner_heat_map'
116
- _C.TRAIN.CRITERION.CHM.LOSS = 'HeatmapLoss'
117
- _C.TRAIN.CRITERION.CHM.WEIGHT = 0.0
118
- _C.TRAIN.CRITERION.CHM.WEIGHTS = []
119
- _C.TRAIN.CRITERION.CHM.NEED_ALL = False
120
-
121
- _C.TRAIN.VIS_MERGE = True
122
- _C.TRAIN.VIS_WEIGHT = 1024
123
- # -----------------------------------------------------------------------------
124
- # Output settings
125
- # -----------------------------------------------------------------------------
126
- _C.CKPT = CN()
127
- _C.CKPT.PYTORCH = './'
128
- _C.CKPT.ROOT = "./checkpoints"
129
- _C.CKPT.DIR = os.path.join(_C.CKPT.ROOT, _C.MODEL.NAME, _C.TAG)
130
- _C.CKPT.RESULT_DIR = os.path.join(_C.CKPT.DIR, 'results', _C.MODE)
131
-
132
- _C.LOGGER = CN()
133
- _C.LOGGER.DIR = os.path.join(_C.CKPT.DIR, "logs")
134
- _C.LOGGER.LEVEL = logging.DEBUG
135
-
136
- # -----------------------------------------------------------------------------
137
- # Misc
138
- # -----------------------------------------------------------------------------
139
- # Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2'), Please confirm your device support FP16(Half).
140
- # overwritten by command line argument
141
- _C.AMP_OPT_LEVEL = 'O1'
142
- # Path to output folder, overwritten by command line argument
143
- _C.OUTPUT = ''
144
- # Tag of experiment, overwritten by command line argument
145
- _C.TAG = 'default'
146
- # Frequency to save checkpoint
147
- _C.SAVE_FREQ = 1
148
- # Frequency to logging info
149
- _C.PRINT_FREQ = 10
150
- # Fixed random seed
151
- _C.SEED = 0
152
- # Perform evaluation only, overwritten by command line argument
153
- _C.EVAL_MODE = False
154
- # Test throughput only, overwritten by command line argument
155
- _C.THROUGHPUT_MODE = False
156
-
157
- # -----------------------------------------------------------------------------
158
- # FIX
159
- # -----------------------------------------------------------------------------
160
- _C.LOCAL_RANK = 0
161
- _C.WORLD_SIZE = 0
162
-
163
- # -----------------------------------------------------------------------------
164
- # Data settings
165
- # -----------------------------------------------------------------------------
166
- _C.DATA = CN()
167
- # Sub dataset of pano_s2d3d
168
- _C.DATA.SUBSET = None
169
- # Dataset name
170
- _C.DATA.DATASET = 'mp3d'
171
- # Path to dataset, could be overwritten by command line argument
172
- _C.DATA.DIR = ''
173
- # Max wall number
174
- _C.DATA.WALL_NUM = 0 # all
175
- # Panorama image size
176
- _C.DATA.SHAPE = [512, 1024]
177
- # Really camera height
178
- _C.DATA.CAMERA_HEIGHT = 1.6
179
- # Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
180
- _C.DATA.PIN_MEMORY = True
181
- # Debug use, fast test performance of model
182
- _C.DATA.FOR_TEST_INDEX = None
183
-
184
- # Batch size for a single GPU, could be overwritten by command line argument
185
- _C.DATA.BATCH_SIZE = 8
186
- # Number of data loading threads
187
- _C.DATA.NUM_WORKERS = 8
188
-
189
- # Training augment
190
- _C.DATA.AUG = CN()
191
- # Flip the panorama horizontally
192
- _C.DATA.AUG.FLIP = True
193
- # Pano Stretch Data Augmentation by HorizonNet
194
- _C.DATA.AUG.STRETCH = True
195
- # Rotate the panorama horizontally
196
- _C.DATA.AUG.ROTATE = True
197
- # Gamma adjusting
198
- _C.DATA.AUG.GAMMA = True
199
-
200
- _C.DATA.KEYS = []
201
-
202
-
203
- _C.EVAL = CN()
204
- _C.EVAL.POST_PROCESSING = None
205
- _C.EVAL.NEED_CPE = False
206
- _C.EVAL.NEED_F1 = False
207
- _C.EVAL.NEED_RMSE = False
208
- _C.EVAL.FORCE_CUBE = False
209
-
210
-
211
- def merge_from_file(cfg_path):
212
- config = _C.clone()
213
- config.merge_from_file(cfg_path)
214
- return config
215
-
216
-
217
- def get_config(args=None):
218
- config = _C.clone()
219
- if args:
220
- if 'cfg' in args and args.cfg:
221
- config.merge_from_file(args.cfg)
222
-
223
- if 'mode' in args and args.mode:
224
- config.MODE = args.mode
225
-
226
- if 'debug' in args and args.debug:
227
- config.DEBUG = args.debug
228
-
229
- if 'hidden_bar' in args and args.hidden_bar:
230
- config.SHOW_BAR = False
231
-
232
- if 'bs' in args and args.bs:
233
- config.DATA.BATCH_SIZE = args.bs
234
-
235
- if 'save_eval' in args and args.save_eval:
236
- config.SAVE_EVAL = True
237
-
238
- if 'val_name' in args and args.val_name:
239
- config.VAL_NAME = args.val_name
240
-
241
- if 'post_processing' in args and args.post_processing:
242
- config.EVAL.POST_PROCESSING = args.post_processing
243
-
244
- if 'need_cpe' in args and args.need_cpe:
245
- config.EVAL.NEED_CPE = args.need_cpe
246
-
247
- if 'need_f1' in args and args.need_f1:
248
- config.EVAL.NEED_F1 = args.need_f1
249
-
250
- if 'need_rmse' in args and args.need_rmse:
251
- config.EVAL.NEED_RMSE = args.need_rmse
252
-
253
- if 'force_cube' in args and args.force_cube:
254
- config.EVAL.FORCE_CUBE = args.force_cube
255
-
256
- if 'wall_num' in args and args.wall_num:
257
- config.DATA.WALL_NUM = args.wall_num
258
-
259
- args = config.MODEL.ARGS[0]
260
- config.CKPT.DIR = os.path.join(config.CKPT.ROOT, f"{args['decoder_name']}_{args['output_name']}_Net",
261
- config.TAG, 'debug' if config.DEBUG else '')
262
- config.CKPT.RESULT_DIR = os.path.join(config.CKPT.DIR, 'results', config.MODE)
263
- config.LOGGER.DIR = os.path.join(config.CKPT.DIR, "logs")
264
-
265
- core_number = os.popen("grep 'physical id' /proc/cpuinfo | sort | uniq | wc -l").read()
266
-
267
- try:
268
- config.DATA.NUM_WORKERS = int(core_number) * 2
269
- print(f"System core number: {config.DATA.NUM_WORKERS}")
270
- except ValueError:
271
- print(f"Can't get system core number, will use config: { config.DATA.NUM_WORKERS}")
272
- config.freeze()
273
- return config
274
-
275
-
276
- def get_rank_config(cfg, local_rank, world_size):
277
- local_rank = 0 if local_rank is None else local_rank
278
- config = cfg.clone()
279
- config.defrost()
280
- if world_size > 1:
281
- ids = config.TRAIN.DEVICE.split(':')[-1].split(',') if ':' in config.TRAIN.DEVICE else range(world_size)
282
- config.TRAIN.DEVICE = f'cuda:{ids[local_rank]}'
283
-
284
- config.LOCAL_RANK = local_rank
285
- config.WORLD_SIZE = world_size
286
- config.SEED = config.SEED + local_rank
287
-
288
- config.freeze()
289
- return config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeeeTeeee01/VODAFONE-CUSTOMER-CHURN-PREDICTION-APP/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: VODAFONE CUSTOMER CHURN PREDICTION APP
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/lpips/pretrained_networks.py DELETED
@@ -1,181 +0,0 @@
1
- from collections import namedtuple
2
- import torch
3
- from torchvision import models as tv
4
- from IPython import embed
5
-
6
- class squeezenet(torch.nn.Module):
7
- def __init__(self, requires_grad=False, pretrained=True):
8
- super(squeezenet, self).__init__()
9
- pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
10
- self.slice1 = torch.nn.Sequential()
11
- self.slice2 = torch.nn.Sequential()
12
- self.slice3 = torch.nn.Sequential()
13
- self.slice4 = torch.nn.Sequential()
14
- self.slice5 = torch.nn.Sequential()
15
- self.slice6 = torch.nn.Sequential()
16
- self.slice7 = torch.nn.Sequential()
17
- self.N_slices = 7
18
- for x in range(2):
19
- self.slice1.add_module(str(x), pretrained_features[x])
20
- for x in range(2,5):
21
- self.slice2.add_module(str(x), pretrained_features[x])
22
- for x in range(5, 8):
23
- self.slice3.add_module(str(x), pretrained_features[x])
24
- for x in range(8, 10):
25
- self.slice4.add_module(str(x), pretrained_features[x])
26
- for x in range(10, 11):
27
- self.slice5.add_module(str(x), pretrained_features[x])
28
- for x in range(11, 12):
29
- self.slice6.add_module(str(x), pretrained_features[x])
30
- for x in range(12, 13):
31
- self.slice7.add_module(str(x), pretrained_features[x])
32
- if not requires_grad:
33
- for param in self.parameters():
34
- param.requires_grad = False
35
-
36
- def forward(self, X):
37
- h = self.slice1(X)
38
- h_relu1 = h
39
- h = self.slice2(h)
40
- h_relu2 = h
41
- h = self.slice3(h)
42
- h_relu3 = h
43
- h = self.slice4(h)
44
- h_relu4 = h
45
- h = self.slice5(h)
46
- h_relu5 = h
47
- h = self.slice6(h)
48
- h_relu6 = h
49
- h = self.slice7(h)
50
- h_relu7 = h
51
- vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
52
- out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
53
-
54
- return out
55
-
56
-
57
- class alexnet(torch.nn.Module):
58
- def __init__(self, requires_grad=False, pretrained=True):
59
- super(alexnet, self).__init__()
60
- alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
61
- self.slice1 = torch.nn.Sequential()
62
- self.slice2 = torch.nn.Sequential()
63
- self.slice3 = torch.nn.Sequential()
64
- self.slice4 = torch.nn.Sequential()
65
- self.slice5 = torch.nn.Sequential()
66
- self.N_slices = 5
67
- for x in range(2):
68
- self.slice1.add_module(str(x), alexnet_pretrained_features[x])
69
- for x in range(2, 5):
70
- self.slice2.add_module(str(x), alexnet_pretrained_features[x])
71
- for x in range(5, 8):
72
- self.slice3.add_module(str(x), alexnet_pretrained_features[x])
73
- for x in range(8, 10):
74
- self.slice4.add_module(str(x), alexnet_pretrained_features[x])
75
- for x in range(10, 12):
76
- self.slice5.add_module(str(x), alexnet_pretrained_features[x])
77
- if not requires_grad:
78
- for param in self.parameters():
79
- param.requires_grad = False
80
-
81
- def forward(self, X):
82
- h = self.slice1(X)
83
- h_relu1 = h
84
- h = self.slice2(h)
85
- h_relu2 = h
86
- h = self.slice3(h)
87
- h_relu3 = h
88
- h = self.slice4(h)
89
- h_relu4 = h
90
- h = self.slice5(h)
91
- h_relu5 = h
92
- alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
93
- out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
94
-
95
- return out
96
-
97
- class vgg16(torch.nn.Module):
98
- def __init__(self, requires_grad=False, pretrained=True):
99
- super(vgg16, self).__init__()
100
- vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
101
- self.slice1 = torch.nn.Sequential()
102
- self.slice2 = torch.nn.Sequential()
103
- self.slice3 = torch.nn.Sequential()
104
- self.slice4 = torch.nn.Sequential()
105
- self.slice5 = torch.nn.Sequential()
106
- self.N_slices = 5
107
- for x in range(4):
108
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
109
- for x in range(4, 9):
110
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
111
- for x in range(9, 16):
112
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
113
- for x in range(16, 23):
114
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
115
- for x in range(23, 30):
116
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
117
- if not requires_grad:
118
- for param in self.parameters():
119
- param.requires_grad = False
120
-
121
- def forward(self, X):
122
- h = self.slice1(X)
123
- h_relu1_2 = h
124
- h = self.slice2(h)
125
- h_relu2_2 = h
126
- h = self.slice3(h)
127
- h_relu3_3 = h
128
- h = self.slice4(h)
129
- h_relu4_3 = h
130
- h = self.slice5(h)
131
- h_relu5_3 = h
132
- vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
133
- out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
134
-
135
- return out
136
-
137
-
138
-
139
- class resnet(torch.nn.Module):
140
- def __init__(self, requires_grad=False, pretrained=True, num=18):
141
- super(resnet, self).__init__()
142
- if(num==18):
143
- self.net = tv.resnet18(pretrained=pretrained)
144
- elif(num==34):
145
- self.net = tv.resnet34(pretrained=pretrained)
146
- elif(num==50):
147
- self.net = tv.resnet50(pretrained=pretrained)
148
- elif(num==101):
149
- self.net = tv.resnet101(pretrained=pretrained)
150
- elif(num==152):
151
- self.net = tv.resnet152(pretrained=pretrained)
152
- self.N_slices = 5
153
-
154
- self.conv1 = self.net.conv1
155
- self.bn1 = self.net.bn1
156
- self.relu = self.net.relu
157
- self.maxpool = self.net.maxpool
158
- self.layer1 = self.net.layer1
159
- self.layer2 = self.net.layer2
160
- self.layer3 = self.net.layer3
161
- self.layer4 = self.net.layer4
162
-
163
- def forward(self, X):
164
- h = self.conv1(X)
165
- h = self.bn1(h)
166
- h = self.relu(h)
167
- h_relu1 = h
168
- h = self.maxpool(h)
169
- h = self.layer1(h)
170
- h_conv2 = h
171
- h = self.layer2(h)
172
- h_conv3 = h
173
- h = self.layer3(h)
174
- h_conv4 = h
175
- h = self.layer4(h)
176
- h_conv5 = h
177
-
178
- outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
179
- out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
180
-
181
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dusan/clickbaitonator/fudge/data.py DELETED
@@ -1,415 +0,0 @@
1
- import random
2
- import math
3
- import os
4
- import pickle
5
- from collections import defaultdict, namedtuple
6
- import string
7
-
8
- os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn off since we're using multiple threads for loading anyway
9
-
10
- from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline, set_seed, GPT2Tokenizer, GPT2Model
11
- import numpy as np
12
- from tqdm import tqdm
13
- import torch
14
-
15
- from fudge.util import suppress_stdout
16
- from fudge.poetry_util import is_iambic, count_syllables, get_rhymes, get_rhyme_group
17
- from fudge.constants import *
18
-
19
- DatasetInfo = namedtuple('DatasetInfo',
20
- ['index2word', 'word2index', 'total_words', 'vocab', 'glove_embeddings'])
21
- RhymeInfo = namedtuple('RhymeInfo',
22
- ['word2rhyme_group', 'rhyme_group_counts', 'rhyme_groups', 'index2rhyme_group', 'rhyme_group2index', 'total_rhyme_groups'])
23
-
24
- def collate(batch):
25
- pad_id = batch[0][4]
26
- inputs = [b[0] for b in batch]
27
- lengths = torch.LongTensor([b[1] for b in batch])
28
- max_length = lengths.max()
29
- for i in range(len(inputs)):
30
- if len(inputs[i]) < max_length:
31
- inputs[i] = torch.cat([inputs[i], torch.zeros(max_length - len(inputs[i])).long()], dim=0) # actually 0 is fine as pad since it's masked out
32
- inputs = torch.stack(inputs, dim=0)
33
- future_words = torch.LongTensor([b[2] for b in batch]).unsqueeze(0).expand(len(batch), -1).clone() # batch x N=batch
34
- labels = torch.zeros_like(future_words).long()
35
- labels = labels.scatter(1, torch.arange(len(batch)).unsqueeze(1), torch.ones(len(batch)).long().unsqueeze(1)).clone()
36
- log_probs = torch.Tensor([b[3] for b in batch])
37
- classification_labels = [b[5] for b in batch] # batch
38
- if type(classification_labels[0]) == list:
39
- for i in range(len(classification_labels)):
40
- assert len(classification_labels[i]) == lengths[i]
41
- if len(classification_labels[i]) < max_length:
42
- classification_labels[i] = torch.cat([torch.LongTensor(classification_labels[i]), -1 + torch.zeros(max_length - len(classification_labels[i])).long()], dim=0)
43
- else:
44
- classification_labels[i] = torch.LongTensor(classification_labels[i])
45
- classification_labels = torch.stack(classification_labels, dim=0) # batch x seq
46
- else:
47
- assert type(classification_labels[0]) == int
48
- classification_labels = torch.LongTensor(classification_labels) # they're just int labels
49
- syllables_to_go = torch.LongTensor([b[6] for b in batch])
50
- future_word_num_syllables = torch.LongTensor([b[7] for b in batch])
51
- rhyme_group_index = torch.LongTensor([b[8] for b in batch])
52
- return (inputs, lengths, future_words, log_probs, labels, classification_labels, syllables_to_go, future_word_num_syllables, rhyme_group_index)
53
-
54
-
55
- def load_rhyme_info(index2word, vocab):
56
- word2rhyme_group = defaultdict(lambda: UNKNOWN_RHYME_GROUP)
57
- rhyme_group_counts = defaultdict(lambda: 0)
58
- rhyme_groups = set()
59
- for word in index2word:
60
- try:
61
- rhyme_group = get_rhyme_group(word)
62
- word2rhyme_group[word] = rhyme_group
63
- rhyme_group_counts[rhyme_group] += (vocab[word] if word in vocab else 1) # for rare words not in vocab, just use 1
64
- rhyme_groups.add(rhyme_group)
65
- except:
66
- rhyme_group_counts[UNKNOWN_RHYME_GROUP] += (vocab[word] if word in vocab else 1)
67
- index2rhyme_group = [UNKNOWN_RHYME_GROUP] + sorted(list(rhyme_groups))
68
- rhyme_group2index = {s: i for i, s in enumerate(index2rhyme_group)}
69
- total_rhyme_groups = sum(rhyme_group_counts.values())
70
-
71
- return RhymeInfo(word2rhyme_group=dict(word2rhyme_group),
72
- rhyme_group_counts=dict(rhyme_group_counts),
73
- rhyme_groups=rhyme_groups,
74
- index2rhyme_group=index2rhyme_group,
75
- rhyme_group2index=rhyme_group2index,
76
- total_rhyme_groups=total_rhyme_groups)
77
-
78
-
79
- class Dataset:
80
- def __init__(self, args):
81
- print('loading data')
82
- random.seed(args.seed)
83
- self.batch_size = args.batch_size
84
- self.data_dir = args.data_dir
85
- self.topic = args.task == 'topic'
86
- self.formality = args.task == 'formality'
87
- self.iambic = args.task == 'iambic'
88
- self.rhyme = args.task == 'rhyme'
89
- self.newline = args.task == 'newline'
90
-
91
- self.tokenizer = AutoTokenizer.from_pretrained(FORMALITY_MODEL_STRING if self.formality else TOPIC_MODEL_STRING)
92
- self.tokenizer.add_special_tokens({'pad_token': PAD_TOKEN})
93
- self.gpt_pad_id = self.tokenizer.encode(PAD_TOKEN)[0] # actually just the vocab size
94
- sentences = []
95
- self.vocab = defaultdict(lambda: 0)
96
- if self.formality:
97
- self.vocab['placeholder'] = 1 # anything so we don't crash
98
- train, val, test = [], [], []
99
- for category, label in [('formal', 1), ('informal', 0)]:
100
- with open(os.path.join(args.data_dir, 'train', category), 'r') as rf:
101
- for i, line in enumerate(rf):
102
- if len(line) > FORMALITY_MAX_LEN:
103
- line = ' '.join(line.strip()[:FORMALITY_MAX_LEN].split()[:-1]) # cutoff words until below max len; chosen so only ~20 examples affected in dataset
104
- if i < FORMALITY_VAL_SIZE // 2:
105
- val.append((line.strip(), label))
106
- else:
107
- train.append((line.strip(), label))
108
- with open(os.path.join(args.data_dir, 'test', category), 'r') as rf:
109
- for line in rf:
110
- if len(line) > FORMALITY_MAX_LEN:
111
- line = ' '.join(line.strip()[:FORMALITY_MAX_LEN].split()[:-1]) # cutoff words until below max len
112
- test.append((line.strip(), label))
113
- self.splits = {}
114
- self.splits['train'], self.splits['val'], self.splits['test'] = train, val, test
115
- else: # topic / poetry
116
- for root, _, filenames in os.walk(args.data_dir):
117
- for fname in filenames:
118
- with open(os.path.join(root, fname), 'r') as rf:
119
- for line in rf:
120
- sentences.append(line.strip())
121
- for word in line.strip().split(' '):
122
- self.vocab[word] += 1
123
- random.shuffle(sentences)
124
- self.splits = {}
125
- if args.debug:
126
- self.splits['val'] = sentences
127
- self.splits['test'] = sentences
128
- self.splits['train'] = sentences
129
- else:
130
- self.splits['val'] = sentences[:TOPIC_VAL_SIZE]
131
- self.splits['test'] = sentences[TOPIC_VAL_SIZE:2*TOPIC_VAL_SIZE]
132
- self.splits['train'] = sentences[2*TOPIC_VAL_SIZE:]
133
-
134
- if args.dataset_info is not None:
135
- print('loading dataset info from file')
136
- with open(args.dataset_info, 'rb') as rf:
137
- dataset_info = pickle.load(rf)
138
- self.vocab, self.total_words, self.index2word, self.word2index, self.glove_embeddings = \
139
- dataset_info.vocab, dataset_info.total_words, dataset_info.index2word, dataset_info.word2index, dataset_info.glove_embeddings
140
- self.dataset_info = dataset_info
141
- else:
142
- print('generating dataset info from scratch')
143
- words_values = list(self.vocab.items())
144
- words_values = sorted(words_values, key=lambda x: x[1], reverse=True)
145
- if args.glove_file is None:
146
- print('no glove embeddings given')
147
- for word, _ in words_values[VOCAB_SIZE:]: # only use somewhat common tokens
148
- del self.vocab[word]
149
- glove_embeddings = None
150
- else:
151
- print('loading glove embeddings')
152
- glove_embeddings = {}
153
- with open(args.glove_file, 'r') as rf:
154
- for i, line in enumerate(rf):
155
- if i % GLOVE_PRINT_PROGRESS_FREQ == 0:
156
- print(i)
157
- line = line.strip().split()
158
- if len(line) != GLOVE_DIM + 1:
159
- continue # skip multi-word embeddings which are rare anyway
160
- glove_embeddings[line[0]] = [float(x) for x in line[1:]]
161
- for word, _ in words_values:
162
- if word not in glove_embeddings:
163
- del self.vocab[word]
164
- self.total_words = sum(self.vocab.values())
165
- self.index2word = [PAD_TOKEN] + sorted(list(self.vocab.keys()))
166
- self.word2index = {s: i for i, s in enumerate(self.index2word)}
167
- self.vocab = dict(self.vocab) # so we can pickle later
168
- if glove_embeddings is None:
169
- self.glove_embeddings = None
170
- else:
171
- self.glove_embeddings = torch.stack([torch.zeros(GLOVE_DIM)] + [torch.Tensor(glove_embeddings[word]) for word in self.index2word[1:]], dim=0)
172
-
173
- self.dataset_info = DatasetInfo(index2word=self.index2word,
174
- word2index=self.word2index,
175
- total_words=self.total_words,
176
- vocab=self.vocab,
177
- glove_embeddings=self.glove_embeddings)
178
-
179
- if self.rhyme:
180
- if args.rhyme_info is not None:
181
- print('loading rhyme info from file')
182
- with open(args.rhyme_info, 'rb') as rf:
183
- self.rhyme_info = pickle.load(rf)
184
- else:
185
- self.rhyme_info = load_rhyme_info(self.index2word, self.vocab)
186
- self.word2rhyme_group, self.rhyme_group_counts, self.rhyme_groups, self.index2rhyme_group, self.rhyme_group2index, self.total_rhyme_groups = \
187
- defaultdict(lambda: UNKNOWN_RHYME_GROUP, self.rhyme_info.word2rhyme_group), self.rhyme_info.rhyme_group_counts, self.rhyme_info.rhyme_groups, self.rhyme_info.index2rhyme_group, self.rhyme_info.rhyme_group2index, self.rhyme_info.total_rhyme_groups
188
-
189
- print('done loading data')
190
- print('split sizes:')
191
- for key in ['train', 'val', 'test']:
192
- print(key, len(self.splits[key]))
193
- if not self.formality:
194
- print('total words', self.total_words)
195
- print('vocab size', len(self.index2word))
196
-
197
-
198
- def shuffle(self, split, seed=None):
199
- assert split in ['train', 'val', 'test']
200
- if seed is not None:
201
- random.seed(seed)
202
- random.shuffle(self.splits[split])
203
-
204
-
205
- def loader(self, split, num_workers=20, indices=None):
206
- assert split in ['train', 'val', 'test']
207
- data = self.splits[split] if indices is None else [self.splits[split][i] for i in indices]
208
- return torch.utils.data.DataLoader(SplitLoader(data, self), batch_size=self.batch_size, pin_memory=True, collate_fn=collate, num_workers=num_workers)
209
-
210
-
211
- class SplitLoader(torch.utils.data.IterableDataset):
212
- def __init__(self, data, parent):
213
- super(SplitLoader).__init__()
214
- self.data = data
215
- self.pos = 0
216
- self.parent = parent
217
-
218
-
219
- def __len__(self):
220
- return len(self.data)
221
-
222
-
223
- def __iter__(self):
224
- return self
225
-
226
-
227
- def __next__(self):
228
- increment = 1
229
- worker_info = torch.utils.data.get_worker_info()
230
- if worker_info is not None: # # in a worker process
231
- increment = worker_info.num_workers
232
- worker_id = worker_info.id
233
- if self.pos == 0:
234
- self.pos = worker_id
235
- valid = False
236
- while not valid:
237
- if self.pos >= len(self):
238
- raise StopIteration
239
- if self.parent.topic:
240
- failed = False
241
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
242
- raw_sentence, classification_label = self.data[self.pos], -1
243
- original_sentence = raw_sentence.split()
244
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
245
- length = len(sentence)
246
- min_sentence_length = MIN_SENTENCE_LENGTH
247
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
248
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
249
- inp = sentence[:pos_to_split]
250
- length = len(inp)
251
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
252
- if not failed and num_words_in_input < len(original_sentence):
253
- future_word_position_max = len(original_sentence) - 1
254
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
255
- future_word = original_sentence[future_word_position]
256
- unstripped_future_word = future_word
257
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
258
- if not failed and future_word in self.parent.word2index.keys():
259
- word_log_prob = math.log(self.parent.vocab[future_word] / self.parent.total_words) # roughly baseline prob of word under noise model
260
- future_word = self.parent.word2index[future_word]
261
- pad_id = self.parent.gpt_pad_id
262
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
263
- valid = not failed
264
- elif self.parent.formality:
265
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
266
- raw_sentence, classification_label = self.data[self.pos]
267
- original_sentence = raw_sentence.split()
268
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
269
- length = len(sentence)
270
- min_sentence_length = MIN_SENTENCE_LENGTH
271
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
272
- pos_to_split = length # no need to split; we're going to train on all possible prefixes simultaneously for efficiency
273
- inp = sentence[:pos_to_split]
274
- length = len(inp)
275
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
276
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
277
- future_word_position_max = len(original_sentence) - 1
278
- future_word_position = 0
279
- future_word = 'placeholder'
280
- unstripped_future_word = future_word
281
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
282
- word_log_prob, future_word = 0, 0
283
- pad_id = self.parent.gpt_pad_id
284
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
285
- valid = True
286
- elif self.parent.iambic:
287
- failed = False
288
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
289
- raw_sentence, classification_label = self.data[self.pos], -1
290
- original_sentence = raw_sentence.split()
291
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
292
- length = len(sentence)
293
- min_sentence_length = MIN_SENTENCE_LENGTH
294
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
295
- pos_to_split = random.randint(0, length - 1)
296
- # try to get a subseq of exactly 10 syllables
297
- inp = sentence[pos_to_split:]
298
- num_syllables = 0
299
- checked = False
300
- for i in range(1, len(inp)):
301
- decoded = self.parent.tokenizer.decode(inp[:i])
302
- num_syllables = count_syllables(decoded)
303
- if num_syllables > POETRY_LINE_SYLLABLES:
304
- inp = inp[:i-1] # might get a few data points where the split is in the middle of a word, but it should be ok for learning.
305
- last_line_length = i-1
306
- decoded = self.parent.tokenizer.decode(inp)
307
- num_syllables = count_syllables(decoded)
308
- checked = True
309
- break
310
- if not checked or num_syllables != POETRY_LINE_SYLLABLES:
311
- failed = True
312
- length = len(inp)
313
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
314
- classification_label = [is_iambic(self.parent.tokenizer.decode(inp)) for _ in range(length)] # predict for whole seq including future
315
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
316
- future_word_position_max = len(original_sentence) - 1
317
- future_word_position = 0
318
- future_word = 'placeholder'
319
- unstripped_future_word = future_word
320
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
321
- if not failed:
322
- word_log_prob, future_word = 0, 0
323
- pad_id = self.parent.gpt_pad_id
324
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
325
- valid = not failed
326
- elif self.parent.rhyme:
327
- failed = False
328
- future_word_num_syllables, rhyme_group_index = -1, -1
329
- raw_sentence, classification_label = self.data[self.pos], -1
330
- original_sentence = raw_sentence.split()
331
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
332
- length = len(sentence)
333
- min_sentence_length = MIN_SENTENCE_LENGTH
334
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
335
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
336
- inp = sentence[:pos_to_split]
337
- length = len(inp)
338
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
339
- if not failed and num_words_in_input < len(original_sentence):
340
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
341
- future_word_position_max = min(len(original_sentence) - 1, num_words_in_input + MAX_COUNT_SYLLABLE_DIST)
342
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
343
- future_word = original_sentence[future_word_position]
344
- unstripped_future_word = future_word
345
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
346
-
347
- words_in_between = original_sentence[num_words_in_input-1:future_word_position+1]
348
- syllables_to_go = count_syllables(' '.join(words_in_between))
349
- if syllables_to_go > MAX_COUNT_SYLLABLE_DIST:
350
- failed = True
351
- future_word_num_syllables = count_syllables(future_word)
352
- rhyme_group = self.parent.word2rhyme_group[future_word]
353
- rhyme_group_index = self.parent.rhyme_group2index[rhyme_group]
354
- # truncate context a bit since we're just doing couplets. random length from 1 to max desired length for this purpose.
355
- desired_length = random.randint(1, MAX_COUNT_SYLLABLE_INPUT_LENGTH)
356
- inp = inp[-desired_length:]
357
- length = len(inp)
358
-
359
- if not failed and future_word in self.parent.word2index.keys():
360
- word_log_prob = math.log(self.parent.rhyme_group_counts[rhyme_group] / self.parent.total_rhyme_groups)
361
- future_word = rhyme_group_index # future conditioning is just the rhyme group in this case
362
- pad_id = self.parent.gpt_pad_id
363
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
364
- valid = not failed
365
- elif self.parent.newline:
366
- failed = False
367
- future_word_num_syllables, rhyme_group_index = -1, -1
368
- raw_sentence, classification_label = self.data[self.pos], -1
369
- original_sentence = raw_sentence.split()
370
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
371
- length = len(sentence)
372
- min_sentence_length = MIN_SENTENCE_LENGTH
373
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
374
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
375
- inp = sentence[:pos_to_split]
376
- while pos_to_split < len(sentence):
377
- if len(self.parent.tokenizer.decode(inp).split()) == len(self.parent.tokenizer.decode(sentence[:pos_to_split + 1]).split()):
378
- pos_to_split += 1
379
- inp = sentence[:pos_to_split]
380
- else:
381
- break
382
- length = len(inp)
383
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
384
- if not failed and num_words_in_input < len(original_sentence):
385
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
386
- future_word_position_max = len(original_sentence) - 1
387
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
388
- future_word = original_sentence[future_word_position]
389
- unstripped_future_word = future_word
390
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
391
-
392
- # future_word = original_sentence[-1] # useful for debugging
393
- words_in_between = original_sentence[num_words_in_input-1:future_word_position+1]
394
- syllables_to_go = count_syllables(' '.join(words_in_between))
395
- if syllables_to_go > MAX_COUNT_SYLLABLE_DIST:
396
- failed = True
397
- # truncate context a bit since we're just doing couplets. random length from 1 to max desired length for this purpose.
398
- desired_length = random.randint(1, MAX_COUNT_SYLLABLE_INPUT_LENGTH)
399
- # desired_length = 10 # useful for debugging
400
- inp = inp[-desired_length:]
401
- length = len(inp)
402
- true_label = 1 if unstripped_future_word.strip()[-1] in PHRASE_ENDS else 0 # common ways to end a phrase
403
- classification_label = [-1 for _ in range(length)]
404
- classification_label[-1] = true_label # only learn at the last position
405
- if not failed and future_word in self.parent.word2index.keys():
406
- word_log_prob = math.log(self.parent.vocab[future_word] / self.parent.total_words) # roughly baseline prob of word under noise model
407
- future_word = self.parent.word2index[future_word]
408
- pad_id = self.parent.gpt_pad_id
409
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
410
- valid = not failed
411
- else:
412
- raise NotImplementedError
413
-
414
- self.pos += increment
415
- return example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EXPOSUREEE/Ai-Image-Enhancer/README.md DELETED
@@ -1,35 +0,0 @@
1
- ---
2
- title: EXPO
3
- emoji: 🏃
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.1.7
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: akhaliq/Real-ESRGAN
11
- ---
12
-
13
- # Configuration
14
-
15
- `title`: _string_
16
- Display title for the Space
17
-
18
- `emoji`: _string_
19
- Space emoji (emoji-only character allowed)
20
-
21
- `colorFrom`: _string_
22
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
23
-
24
- `colorTo`: _string_
25
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
26
-
27
- `sdk`: _string_
28
- Can be either `gradio` or `streamlit`
29
-
30
- `app_file`: _string_
31
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
32
- Path is relative to the root of the repository.
33
-
34
- `pinned`: _boolean_
35
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/tools/infer_batch_rvc.py DELETED
@@ -1,72 +0,0 @@
1
- import argparse
2
- import os
3
- import sys
4
-
5
- print("Command-line arguments:", sys.argv)
6
-
7
- now_dir = os.getcwd()
8
- sys.path.append(now_dir)
9
- import sys
10
-
11
- import tqdm as tq
12
- from dotenv import load_dotenv
13
- from scipy.io import wavfile
14
-
15
- from configs.config import Config
16
- from infer.modules.vc.modules import VC
17
-
18
-
19
- def arg_parse() -> tuple:
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--f0up_key", type=int, default=0)
22
- parser.add_argument("--input_path", type=str, help="input path")
23
- parser.add_argument("--index_path", type=str, help="index path")
24
- parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm")
25
- parser.add_argument("--opt_path", type=str, help="opt path")
26
- parser.add_argument("--model_name", type=str, help="store in assets/weight_root")
27
- parser.add_argument("--index_rate", type=float, default=0.66, help="index rate")
28
- parser.add_argument("--device", type=str, help="device")
29
- parser.add_argument("--is_half", type=bool, help="use half -> True")
30
- parser.add_argument("--filter_radius", type=int, default=3, help="filter radius")
31
- parser.add_argument("--resample_sr", type=int, default=0, help="resample sr")
32
- parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate")
33
- parser.add_argument("--protect", type=float, default=0.33, help="protect")
34
-
35
- args = parser.parse_args()
36
- sys.argv = sys.argv[:1]
37
-
38
- return args
39
-
40
-
41
- def main():
42
- load_dotenv()
43
- args = arg_parse()
44
- config = Config()
45
- config.device = args.device if args.device else config.device
46
- config.is_half = args.is_half if args.is_half else config.is_half
47
- vc = VC(config)
48
- vc.get_vc(args.model_name)
49
- audios = os.listdir(args.input_path)
50
- for file in tq.tqdm(audios):
51
- if file.endswith(".wav"):
52
- file_path = os.path.join(args.input_path, file)
53
- _, wav_opt = vc.vc_single(
54
- 0,
55
- file_path,
56
- args.f0up_key,
57
- None,
58
- args.f0method,
59
- args.index_path,
60
- None,
61
- args.index_rate,
62
- args.filter_radius,
63
- args.resample_sr,
64
- args.rms_mix_rate,
65
- args.protect,
66
- )
67
- out_path = os.path.join(args.opt_path, file)
68
- wavfile.write(out_path, wav_opt[0], wav_opt[1])
69
-
70
-
71
- if __name__ == "__main__":
72
- main()