diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/you.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/you.py deleted file mode 100644 index 02985ed14d4848c2de20a99b4771d208286a2558..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/you.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -import json -import urllib.parse - -from curl_cffi import requests - -config = json.loads(sys.argv[1]) -messages = config['messages'] -prompt = '' - - -def transform(messages: list) -> list: - result = [] - i = 0 - - while i < len(messages): - if messages[i]['role'] == 'user': - question = messages[i]['content'] - i += 1 - - if i < len(messages) and messages[i]['role'] == 'assistant': - answer = messages[i]['content'] - i += 1 - else: - answer = '' - - result.append({'question': question, 'answer': answer}) - - elif messages[i]['role'] == 'assistant': - result.append({'question': '', 'answer': messages[i]['content']}) - i += 1 - - elif messages[i]['role'] == 'system': - result.append({'question': messages[i]['content'], 'answer': ''}) - i += 1 - - return result - -headers = { - 'Content-Type': 'application/x-www-form-urlencoded', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', - 'Sec-Fetch-Site': 'same-origin', - 'Accept-Language': 'en-GB,en;q=0.9', - 'Sec-Fetch-Mode': 'navigate', - 'Host': 'you.com', - 'Origin': 'https://you.com', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15', - 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA', - 'Connection': 'keep-alive', - 'Sec-Fetch-Dest': 'document', - 'Priority': 'u=0, i', -} - -if messages[-1]['role'] == 'user': - prompt = messages[-1]['content'] - messages = messages[:-1] - -params = urllib.parse.urlencode({ - 'q': prompt, - 'domain': 'youchat', - 'chat': transform(messages) -}) - -def output(chunk): - if b'"youChatToken"' in chunk: - chunk_json = json.loads(chunk.decode().split('data: ')[1]) - - print(chunk_json['youChatToken'], flush=True, end = '') - -while True: - try: - response = requests.get(f'https://you.com/api/streamingSearch?{params}', - headers=headers, content_callback=output, impersonate='safari15_5') - - exit(0) - - except Exception as e: - print('an error occured, retrying... |', e, flush=True) - continue \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD Software The Ultimate CAD Solution for Singapore Designers.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD Software The Ultimate CAD Solution for Singapore Designers.md deleted file mode 100644 index 56f76f8057337878fb26cf0281b652e5cd69ff30..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD Software The Ultimate CAD Solution for Singapore Designers.md +++ /dev/null @@ -1,21 +0,0 @@ -
-

Why You Should Choose AutoCAD Software for Your Design Projects in Singapore

-

AutoCAD is a leading computer-aided design (CAD) software that helps you create precise 2D and 3D drawings, models, and documentation for any design project. Whether you are an architect, engineer, construction professional, or designer, AutoCAD can help you turn your ideas into reality. But why should you choose AutoCAD software for your design projects in Singapore? Here are some reasons:

- -

As you can see, AutoCAD software is a great choice for your design projects in Singapore. It can help you create high-quality drawings and models that meet your standards and specifications. It can also help you collaborate with your team members and clients across different platforms and devices. And it can help you save time and money with its automation and customization features.

-

autocad software singapore


Download File >>> https://byltly.com/2uKxv1



-

If you want to learn more about AutoCAD software or try it for free, visit the Autodesk website or contact their Singapore office. You can also find online tutorials, courses, forums, and blogs that can help you get started with AutoCAD or enhance your skills.

- -

AutoCAD software is not only a powerful and versatile tool for design and documentation, but also a creative and expressive medium for art and innovation. You can use AutoCAD to create stunning visualizations, animations, and simulations that showcase your design ideas and concepts. You can also use AutoCAD to explore new possibilities and solutions that can improve the quality and sustainability of your design projects.

-

AutoCAD software is also compatible with other Autodesk products and services that can enhance your design workflow and outcomes. For example, you can use AutoCAD with BIM 360 to manage your projects in the cloud, with Fusion 360 to create 3D models and prototypes, with Inventor to design and simulate mechanical systems, with Maya to create realistic 3D animations and effects, and more.

-

AutoCAD software is a smart investment for your design career and business in Singapore. It can help you gain a competitive edge in the market and meet the demands and expectations of your clients. It can also help you develop your skills and knowledge in various design fields and disciplines. And it can help you connect with a global community of AutoCAD users who can inspire you and support you in your design journey.

-

-

So what are you waiting for? Start your free trial of AutoCAD software today and discover how it can transform your design projects in Singapore. You can also contact Autodesk Singapore for more information or assistance. And don't forget to check out the latest news, updates, and tips on AutoCAD on the Autodesk blog.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Believer Korean Movie English Subtitles Download.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Believer Korean Movie English Subtitles Download.md deleted file mode 100644 index c9e598ea3a5d57f49a9e8db4371942d65a60cc91..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Believer Korean Movie English Subtitles Download.md +++ /dev/null @@ -1,20 +0,0 @@ - -

How to Watch Believer Korean Movie with English Subtitles Online

-

Believer is a 2018 South Korean crime thriller film directed by Lee Hae-young. It is a remake of the 2012 Hong Kong film Drug War. The film stars Cho Jin-woong as a detective who infiltrates a drug cartel and tries to find the boss with the help of a drug dealer played by Ryu Jun-yeol.

-

Believer Korean Movie English Subtitles Download


DOWNLOADhttps://byltly.com/2uKvtH



-

The film was a box office hit in South Korea, earning over 30 million USD. It also received positive reviews from critics and audiences for its gripping plot, stylish action scenes, and stellar performances.

-

If you are a fan of Korean cinema and want to watch Believer with English subtitles online, you have several options. Here are some of the best ways to enjoy this thrilling movie:

- -

These are some of the best ways to watch Believer Korean movie with English subtitles online. However, you should always be careful of illegal or pirated sites that offer free downloads or streams of the movie. These sites may contain viruses, malware, or other harmful content that can damage your device or compromise your privacy. Always use legal and reputable sources to watch your favorite movies online.

-

- -

Believer is not only a thrilling crime movie, but also a commentary on the social and political issues in South Korea. The film explores themes such as corruption, loyalty, betrayal, revenge, and justice. It also shows the dark side of the drug trade and its impact on the lives of ordinary people.

-

The film also features a stellar cast of actors who deliver powerful performances. Cho Jin-woong is convincing as the determined and conflicted detective who risks his life to catch the drug lord. Ryu Jun-yeol is charismatic as the drug dealer who has a hidden agenda and a mysterious past. The film also has appearances by Kim Joo-hyuk, Cha Seung-won, Park Hae-joon, and Kim Sung-ryung.

-

Believer is a must-watch for fans of Korean cinema and crime thrillers. It is a remake that surpasses the original in many aspects. It is a film that will keep you on the edge of your seat and make you think about the moral dilemmas and consequences of the characters' actions.

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar English Spanish Interpreter Professional 4.4 Crack .md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar English Spanish Interpreter Professional 4.4 Crack .md deleted file mode 100644 index 5064e0e220117c881ae15dda23fdefc5af87db86..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar English Spanish Interpreter Professional 4.4 Crack .md +++ /dev/null @@ -1,130 +0,0 @@ - -

Descargar English Spanish Interpreter Professional 4.4 Crack: A Comprehensive Guide

-

If you are looking for a powerful and reliable translation program that can help you communicate effectively in both English and Spanish, you might have heard of English Spanish Interpreter Professional 4.4. This is a multi-functional software that can translate all types of documents from English to Spanish and vice versa, with a high level of accuracy and quality. But what if you don't want to pay for the full version of this program? Is there a way to download it for free? And if so, is it safe and legal to do so?

-

descargar english spanish interpreter professional 4.4 crack


Download Ziphttps://byltly.com/2uKxbh



-

In this article, we will answer these questions and more. We will explain what English Spanish Interpreter Professional 4.4 is, what features and benefits it offers, how to download it for free, what risks and drawbacks are involved, and what alternatives are available. By the end of this article, you will have a clear idea of whether you should descargar (download) English Spanish Interpreter Professional 4.4 crack or not.

-

Features and Benefits of English Spanish Interpreter Professional 4.4

-

English Spanish Interpreter Professional 4.4 is not your typical translation software that simply translates word by word without considering the context or the meaning of the sentences. It is a sophisticated program that analyzes the whole text that you want to translate, and interprets the expressions and phrases according to their usage and relevance. This way, it can produce translations that are much more natural, fluent, and correct than those of conventional programs.

-

But that's not all. English Spanish Interpreter Professional 4.4 also comes with a built-in dictionary and thesaurus, so you can look up the definitions, synonyms, antonyms, and examples of any word in both languages. You can also use the spell checker, the ambiguity checker, and the verb conjugator to improve the grammar and style of your translations. And if you want to save your translations to a .doc file, you can do so easily thanks to its compatibility with MS Word.

-

With all these features and benefits, English Spanish Interpreter Professional 4.4 can help you with any translation task that you might have, whether it is for personal or professional purposes. You can use it to translate emails, letters, reports, articles, books, websites, or any other type of document that you need.

-

How to Download English Spanish Interpreter Professional 4.4 Crack for Free

-

Now that you know what English Spanish Interpreter Professional 4.4 can do for you, you might be wondering how to get it for free. After all, the full version of this program costs $149 USD, which is not exactly cheap for most people.

-

One way to download it for free is to look for a crack version on the internet. A crack is a modified version of a software that bypasses its security features and allows you to use it without paying for a license or activation code.

-

However, before you rush to descargar english spanish interpreter professional 4.4 crack from any website that claims to offer it, you should be aware of some risks and drawbacks that are involved in this process.

- -

If you still want to take these risks and download English Spanish Interpreter Professional 4.4 crack for free, here are some steps that you can follow:

-
    -
  1. Find a website that offers English Spanish Interpreter Professional 4.4 crack download link when available (for example ). Make sure that the website has positive reviews and feedback from other users.
  2. -
  3. Download the crack file from the website (usually a .zip or .rar file) and save it to your computer.
  4. -
  5. Extract the crack file using a program like WinRAR or WinZip.
  6. -
  7. Run the crack file (usually an .exe file) as an administrator.
  8. -
  9. Follow the instructions on the screen to install and activate English Spanish Interpreter Professional 4.4 on your computer.
  10. -
  11. Enjoy using English Spanish Interpreter Professional 4.4 for free!
  12. -
-

However, before you install and run any cracked software on your computer, make sure that you take some precautions:

-

download english spanish translator pro 4.4 full version
-english spanish interpreter professional 4.4 serial key
-how to install english spanish interpreter professional 4.4 crack
-english spanish interpreter professional 4.4 free download with crack
-english spanish interpreter professional 4.4 activation code
-english spanish interpreter professional 4.4 patch
-english spanish interpreter professional 4.4 license key
-english spanish interpreter professional 4.4 keygen
-english spanish interpreter professional 4.4 cracked apk
-english spanish interpreter professional 4.4 torrent download
-english spanish interpreter professional 4.4 mega link
-english spanish interpreter professional 4.4 portable
-english spanish interpreter professional 4.4 for windows 10
-english spanish interpreter professional 4.4 for mac
-english spanish interpreter professional 4.4 for android
-english spanish interpreter professional 4.4 online
-english spanish interpreter professional 4.4 review
-english spanish interpreter professional 4.4 features
-english spanish interpreter professional 4.4 tutorial
-english spanish interpreter professional 4.4 manual
-english spanish interpreter professional 4.4 system requirements
-english spanish interpreter professional 4.4 alternative
-english spanish interpreter professional 4.4 vs google translate
-english spanish interpreter professional 4.4 vs babylon translator
-english spanish interpreter professional 4.4 vs systran translator
-best english spanish interpreter software with crack
-how to get english spanish interpreter software for free
-where to find english spanish interpreter software crack
-is it safe to use english spanish interpreter software crack
-how to update english spanish interpreter software crack
-how to uninstall english spanish interpreter software crack
-how to fix english spanish interpreter software crack errors
-how to register english spanish interpreter software crack
-how to use english spanish interpreter software crack offline
-how to translate documents with english spanish interpreter software crack
-how to translate audio with english spanish interpreter software crack
-how to translate video with english spanish interpreter software crack
-how to translate websites with english spanish interpreter software crack
-how to translate subtitles with english spanish interpreter software crack
-how to translate games with english spanish interpreter software crack
-how to improve your english with english spanish interpreter software crack
-how to learn spanish with english spanish interpreter software crack
-how to teach english or spanish with english spanish interpreter software crack
-how to customize your dictionary with english spanish interpreter software crack
-how to add slang and idioms with english spanish interpreter software crack
-how to switch between british and american english with english spanish interpreter software crack
-how to switch between european and latin american spanish with english spanish interpr

- -

Alternatives to English Spanish Interpreter Professional 4.4 Crack

-

If you are not willing to take these risks or face these drawbacks when downloading English Spanish Interpreter Professional 4.4 crack for free, don't worry! There are other alternatives that you can try instead.

-

One alternative is to look for other translation programs that offer similar or better features and performance than English Spanish Interpreter Professional 4.4 but at a lower price or even for free.

-

Some examples of these programs are:

- -

Another alternative is to use online translation services or apps instead of desktop software. These are web-based or mobile-based tools that allow you to translate texts on the go, without having to install anything on your computer.

-

Some advantages of using online translation services or apps are:

- -

Some disadvantages of using online translation services or apps are:

- -

Some examples of online translation services or apps are:

- -

Conclusion

-

In conclusion, English Spanish Interpreter Professional 4.4 is a powerful and reliable translation program that can help you communicate effectively in both English and Spanish. It has many features and benefits that make it stand out from other similar programs. However, if you want to download it for free, you may face some risks and drawbacks that may affect your computer, your privacy, your legality, and your ethics.

-

Therefore, you may want to consider other alternatives that are available, such as other translation programs that offer similar or better features and performance at a lower price or even for free, or online translation services or apps that allow you to translate texts on the go without having to install anything on your computer.

-

The choice is yours, but whatever you decide, we hope that this article has helped you understand more about descargar english spanish interpreter professional 4.4 crack and its alternatives.

-

FAQs

-

Here are some frequently asked questions about descargar english spanish interpreter professional 4.4 crack and its alternatives:

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Filmora Free Without Watermark How to Edit Videos Like a Pro on a Budget.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Filmora Free Without Watermark How to Edit Videos Like a Pro on a Budget.md deleted file mode 100644 index e21ddcb63332dcfff645443018d9c3fb171fbbb2..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Filmora Free Without Watermark How to Edit Videos Like a Pro on a Budget.md +++ /dev/null @@ -1,18 +0,0 @@ - -

How to Use Filmora Free Without Watermark

-

Filmora is a popular and easy-to-use video editing software that can help you create stunning videos for various purposes. However, if you use the free version of Filmora, you will have to deal with a big watermark on your exported videos, which can be annoying and unprofessional.

-

So, is there a way to use Filmora free without watermark? Unfortunately, the answer is no. Filmora only offers a 30-day free trial, and after that, you need to subscribe to one of its plans to remove the watermark. However, there are some alternatives that you can try to use Filmora free without watermark or use other video editors that are free and without watermark.

-

filmora free without watermark


Download File 🆗 https://byltly.com/2uKvfq



-

In this article, we will show you 10 ways to use Filmora free without watermark or find the best free Filmora alternatives for your video editing needs.

-

10 Ways to Use Filmora Free Without Watermark

-

Here are 10 ways that you can try to use Filmora free without watermark or find other free video editors that are similar to Filmora:

-
    -
  1. Use the built-in video editor on your device: If you only need some basic editing functions, such as trimming, cropping, rotating, or adding text, you can use the built-in video editor on your device. For example, Windows 10 has a Photos app that can edit videos, and Mac has iMovie that can do more advanced editing. These video editors are free and without watermark.
  2. -
  3. Use an online video editor: If you don't want to download or install any software on your device, you can use an online video editor that works on your browser. There are many online video editors that are free and without watermark, such as Clipchamp, Kapwing, WeVideo, etc. However, online video editors may have some limitations in terms of file size, quality, speed, and features.
  4. -
  5. Use a screen recorder: If you have already edited your video with Filmora and want to export it without watermark, you can use a screen recorder to record your video playback on Filmora. There are many screen recorders that are free and without watermark, such as OBS Studio, VLC Media Player, Windows Game Bar, etc. However, screen recording may affect the quality and sound of your video.
  6. -
  7. Use a watermark remover: If you have already exported your video with Filmora and want to remove the watermark from it, you can use a watermark remover tool to erase or replace the watermark. There are many watermark remover tools that are free and without watermark, such as VideoProc, Apowersoft Online Watermark Remover, Inpaint, etc. However, watermark remover tools may not work perfectly on every video and may leave some traces or artifacts.
  8. -
  9. Use a video converter: If you have already exported your video with Filmora and want to change its format or quality, you can use a video converter tool to convert your video to another format or resolution. There are many video converter tools that are free and without watermark, such as HandBrake, Freemake Video Converter, VLC Media Player, etc. However, video converter tools may not be able to remove the watermark from your video.
  10. -
  11. Use a different version of Filmora: If you want to use Filmora for free without watermark but don't mind using an older version of it, you can try to find a different version of Filmora that has no watermark or has a smaller watermark. For example, some users claim that Filmora 8.5.3 has no watermark or has a smaller watermark than the latest version. However, using a different version of Filmora may not be safe or legal and may lack some features or updates.
  12. -
  13. Use a cracked version of Filmora: If you want to use Filmora for free without watermark but don't mind breaking the law or risking your device's security, you can try to find a cracked version of Filmora that has no watermark or has all the premium features unlocked. For example, some websites offer Filmora 11 free download without watermark for Windows. However, using a cracked version of Filmora is illegal and unethical and

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Allwinner A13 Tw A0910 V22 1126 16 PATCHED.md b/spaces/1gistliPinn/ChatGPT4/Examples/Allwinner A13 Tw A0910 V22 1126 16 PATCHED.md deleted file mode 100644 index b3c12e200256595d98f4c7cd5c295503a7f9a0f9..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Allwinner A13 Tw A0910 V22 1126 16 PATCHED.md +++ /dev/null @@ -1,12 +0,0 @@ -
    -

    Allwinner A13 TW A0910 V22 1126 16: A Budget-Friendly Tablet

    -

    If you are looking for a low-cost tablet that can run Android OS, you might want to check out the Allwinner A13 TW A0910 V22 1126 16. This tablet is based on the Allwinner A13 chipset, which is a cheaper version of the A10 that lacks HDMI-transmitter and SATA-controller[^2^]. The A13 is primarily targeted towards tablets and low-budget IoT devices[^2^].

    -

    The Allwinner A13 TW A0910 V22 1126 16 has a 9-inch capacitive touchscreen with a resolution of 800x480 pixels. It has a front-facing camera, a microSD card slot, a microUSB port, a headphone jack, and a power button. It runs on Android 4.0 Ice Cream Sandwich with test keys[^1^]. The tablet has a battery capacity of 4000 mAh and weighs about 500 grams.

    -

    allwinner a13 tw a0910 v22 1126 16


    Download File ✶✶✶ https://imgfil.com/2uy1fH



    -

    The Allwinner A13 TW A0910 V22 1126 16 is not a high-end tablet, but it can perform basic tasks such as browsing the web, watching videos, playing games, and reading e-books. It is suitable for users who are looking for a simple and affordable device that can run Android OS. However, it may not support some of the latest apps and features that require more advanced hardware and software.

    One of the advantages of the Allwinner A13 TW A0910 V22 1126 16 is that it can be overclocked to boost its performance. Some users have reported that they were able to overclock the tablet to 1.152 GHz, which increased the Antutu benchmark score by nearly 400 points[^2^]. Overclocking can also improve the gaming experience, as some 3D games that require more processing power can run more smoothly on the tablet[^2^]. However, overclocking may also cause instability, overheating, and battery drain, so it should be done with caution and at your own risk.

    -

    Another advantage of the Allwinner A13 TW A0910 V22 1126 16 is that it supports OTG USB, which means you can connect external devices such as mouse, keyboard, gamepad, or 3G dongle to the tablet via a mini USB port. This can enhance the functionality and versatility of the tablet, as you can use it for different purposes and scenarios. For example, you can use a mouse and keyboard for typing and browsing, a gamepad for playing games, or a 3G dongle for accessing mobile internet[^2^]. However, not all devices may be compatible with the tablet, so you may need to check before buying or using them.

    -

    One of the disadvantages of the Allwinner A13 TW A0910 V22 1126 16 is that it has a low-resolution screen. The screen has a resolution of 800x480 pixels, which is quite low for a 9-inch tablet. This means that the images and text may appear pixelated and blurry on the screen. The screen is also not covered with glass but with plastic, which may affect the touch sensitivity and durability of the screen[^1^]. The low-resolution screen may also limit the quality and compatibility of some apps and games that require higher resolutions.

    -

    Another disadvantage of the Allwinner A13 TW A0910 V22 1126 16 is that it has a poor battery life. The tablet has a battery capacity of 4000 mAh, which is not very high for a tablet of this size and performance. Some users have reported that the battery does not last long and drains quickly when using the tablet[^3^]. The battery life may also be affected by factors such as overclocking, brightness, Wi-Fi, and apps running in the background. The battery life may be insufficient for users who need to use the tablet for long periods of time or on the go.

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Esr Disc Patcher Gui 0.24a Downloadl What is ESR and How Does it Work?.md b/spaces/1gistliPinn/ChatGPT4/Examples/Esr Disc Patcher Gui 0.24a Downloadl What is ESR and How Does it Work?.md deleted file mode 100644 index 43a63a737790258bc36222b46b9706c0f64bed22..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Esr Disc Patcher Gui 0.24a Downloadl What is ESR and How Does it Work?.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Esr Disc Patcher Gui 0.24a Downloadl


    Download Filehttps://imgfil.com/2uy08n



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Champak Comics In Hindi Pdf.md b/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Champak Comics In Hindi Pdf.md deleted file mode 100644 index 6d7111ac9fa5bfb24f73bb9e6c60d8ce380ebcc4..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Champak Comics In Hindi Pdf.md +++ /dev/null @@ -1,21 +0,0 @@ - -

    How to Free Download Champak Comics In Hindi Pdf

    -

    If you are a fan of Champak comics, you might be wondering how to free download Champak comics in Hindi pdf. Champak is one of the most popular children's magazines in India, featuring stories, puzzles, jokes, and cartoons. It was launched in 1968 and has been entertaining generations of readers ever since.

    -

    Free Download Champak Comics In Hindi Pdf


    DOWNLOAD ★★★ https://imgfil.com/2uxZ8D



    -

    Champak comics are available in various languages, including Hindi, English, Gujarati, Marathi, Kannada, Tamil, Telugu, Malayalam, and Bengali. However, if you want to read Champak comics in Hindi pdf format, you might have a hard time finding them online. Most of the websites that claim to offer free download Champak comics in Hindi pdf are either fake or full of malware.

    -

    So how can you free download Champak comics in Hindi pdf safely and legally? The answer is simple: you can use Bing search engine. Bing is a powerful and reliable search engine that can help you find what you are looking for. Here are the steps to free download Champak comics in Hindi pdf using Bing:

    -
      -
    1. Go to www.bing.com and type "Free Download Champak Comics In Hindi Pdf" in the search box.
    2. -
    3. You will see a list of results that match your query. Look for the ones that have a green lock icon and a pdf file extension. These are the ones that are safe and legal to download.
    4. -
    5. Click on the result that you like and you will be taken to the website where you can download the pdf file. You might have to sign up or complete a survey before you can access the file.
    6. -
    7. Once you have downloaded the file, you can open it with any pdf reader and enjoy reading Champak comics in Hindi.
    8. -
    -

    That's it! You have successfully free downloaded Champak comics in Hindi pdf using Bing. You can repeat the same process for any other Champak comics or magazines that you want to read. Bing is your best friend when it comes to finding and downloading anything online.

    -

    -

    If you liked this article, please share it with your friends and family who are also fans of Champak comics. And don't forget to check out our other articles on how to free download various things using Bing. Happy reading!

    - -

    Champak comics are not only fun to read, but also educational and inspirational. They teach children about various topics, such as science, history, culture, morals, and values. They also encourage children to use their imagination and creativity, and to develop their skills and talents.

    -

    Some of the most famous characters in Champak comics are Cheeku the rabbit, Meeku the mouse, Baddy the fox, Jumbo the elephant, and Champakvan's king Raja Hooja. They have many adventures and misadventures in Champakvan, a fictional forest where they live. They also meet many other animals and humans who become their friends or foes.

    -

    Champak comics are suitable for children of all ages, from toddlers to teenagers. They can also be enjoyed by adults who want to relive their childhood memories or share them with their kids. Champak comics are a great way to bond with your family and have some fun together.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Logo Maker Generate a Professional Logo in Minutes.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Logo Maker Generate a Professional Logo in Minutes.md deleted file mode 100644 index 7ddf1ada1191a43605efdc9ec68e4cfd3408b0b4..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Logo Maker Generate a Professional Logo in Minutes.md +++ /dev/null @@ -1,73 +0,0 @@ - -

    How to Create a Stunning Car Logo for Your Brand

    -

    Introduction

    -

    Car logos are more than just symbols that identify different car brands and models. They are also expressions of your brand's personality, values, and vision. A well-designed car logo can make your brand stand out from the crowd, attract customers, and build loyalty.

    -

    The history of car logos

    -

    Car logos have a long and fascinating history that dates back to the early days of the automotive industry. One of the earliest car logos was created by Peugeot in 1905. The logo featured a lion, which was a reference to the company's home city of Lyon. The lion was depicted standing on its hind legs, symbolizing the strength and power of the Peugeot brand.

    -

    car logo


    Download Zip https://urlin.us/2uSZ4L



    -

    Another early example of a car logo was created by Cadillac in 1906. The logo featured a coat of arms with the Cadillac name at the top and a wreath surrounding it. The coat of arms was meant to symbolize the company's status as a luxury brand.

    -

    Over time, car logos evolved to reflect changing trends, technologies, and customer preferences. Some car logos became more minimalist and modern, while others retained their classic and elegant style.

    - 5. Fifth, write this custom message at the very bottom of the article: "

    I hope you enjoyed this article on how to create a stunning car logo for your brand. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -
      -
    1. What are some of the most famous car logos in the world?
    2. -

      Some of the most famous car logos in the world are Mercedes-Benz's three-pointed star, BMW's blue and white roundel, Ferrari's prancing horse, Toyota's three overlapping ellipses, and Volkswagen's stylized "VW".

      -
    3. What are some of the best tools to create a car logo?
    4. -

      There are many tools available online that can help you create a car logo for free or for a fee. Some of the best tools are Looka, 99designs, LogoMaker, and Canva.

      -
    5. What are some of the best practices for designing a car logo?
    6. -

      Some of the best practices for designing a car logo are:

      -

      car logo design
      -car logo maker
      -car logo quiz
      -car logo meaning
      -car logo history
      -car logo stickers
      -car logo decals
      -car logo embroidery
      -car logo vector
      -car logo png
      -car logo svg
      -car logo font
      -car logo generator
      -car logo ideas
      -car logo inspiration
      -car logo template
      -car logo animation
      -car logo reveal
      -car logo wallpaper
      -car logo art
      -car logo drawing
      -car logo painting
      -car logo coloring pages
      -car logo printables
      -car logo trivia
      -car logo facts
      -car logo evolution
      -car logo comparison
      -car logo ranking
      -car logo rating
      -car logo review
      -car logo analysis
      -car logo research
      -car logo statistics
      -car logo trends
      -car logo news
      -car logo blog
      -car logo podcast
      -car logo video
      -car logo youtube channel
      -car logo instagram account
      -car logo facebook page
      -car logo twitter handle
      -car logo pinterest board
      -car logo reddit community
      -car logo forum
      -car logo website
      -car logo domain name
      -car logo niche market

      -
        -
      • Choose colors that reflect your brand's personality and message.
      • -
      • Use fonts that are legible and consistent with your brand's style.
      • -
      • Pick a symbol that is

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Facebook Videos for Free in HD Quality - No Watermark.md b/spaces/1phancelerku/anime-remove-background/Download Facebook Videos for Free in HD Quality - No Watermark.md deleted file mode 100644 index 7b60082186504130e2b39c6241a582bca65ac8ab..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Facebook Videos for Free in HD Quality - No Watermark.md +++ /dev/null @@ -1,209 +0,0 @@ - -

        How to Download Facebook Videos Without Watermark

        -

        Facebook is one of the most popular social media platforms in the world, with billions of users who share various types of content, including videos. Sometimes, you might come across a Facebook video that you want to save offline for later viewing or sharing. However, not all Facebook videos are easy to download, especially if they have a watermark on them.

        -

        download facebook no watermark


        Download File ►►►►► https://jinyurl.com/2uNOi9



        -

        A watermark is a logo, text, or image that is overlaid on a video to indicate its source or ownership. While some watermarks are subtle and unobtrusive, others are large and annoying, covering important parts of the video or distracting the viewers. If you want to download a Facebook video without watermark, you might need some tools or apps that can help you do that.

        -

        In this article, we will show you how to download Facebook videos without watermark using online tools and mobile apps. We will also explain the benefits of downloading Facebook videos without watermark and answer some frequently asked questions about this topic.

        -

        Introduction

        -

        What is a watermark and why you might want to avoid it

        -

        A watermark is a logo, text, or image that is overlaid on a video to indicate its source or ownership. Watermarks are usually used by content creators or platforms to protect their intellectual property rights and prevent unauthorized copying or distribution of their videos. However, watermarks can also have some drawbacks for the viewers and the downloaders of the videos.

        -

        Some of the reasons why you might want to avoid watermarks on Facebook videos are:

        -

        How to download facebook videos without watermark
        -Download facebook live videos without watermark
        -Facebook video downloader online free no watermark
        -Save facebook videos to computer without watermark
        -Download facebook stories without watermark
        -Download facebook reels without watermark
        -Facebook video downloader chrome extension no watermark
        -Download private facebook videos without watermark
        -Download facebook watch videos without watermark
        -Facebook video downloader HD 1080p no watermark
        -Download facebook video to mp4 without watermark
        -Download facebook video to mp3 without watermark
        -Facebook video converter online no watermark
        -Download facebook video with subtitles no watermark
        -Download facebook 360 video without watermark
        -Download facebook video ads without watermark
        -Download facebook video cover without watermark
        -Download facebook video profile picture without watermark
        -Download facebook video from messenger without watermark
        -Download facebook video from group without watermark
        -Download facebook video from page without watermark
        -Download facebook video from event without watermark
        -Download facebook video from marketplace without watermark
        -Download facebook video from dating without watermark
        -Download facebook video from gaming without watermark
        -Download facebook video from news feed without watermark
        -Download facebook video from timeline without watermark
        -Download facebook video from album without watermark
        -Download facebook video from tag no watermark
        -Download facebook video from comment no watermark
        -Download facebook video from link no watermark
        -Download facebook video from embed code no watermark
        -Download facebook video from mobile no watermark
        -Download facebook video from desktop no watermark
        -Download facebook video from app no watermark
        -Download facebook video from website no watermark
        -Best tool to download facebook videos without watermark
        -Fast and easy way to download facebook videos without watermark
        -How to download multiple facebook videos without watermark
        -How to download long facebook videos without watermark
        -How to download high quality facebook videos without watermark
        -How to download low quality facebook videos without watermark
        -How to download full screen facebook videos without watermark
        -How to download cropped facebook videos without watermark
        -How to download edited facebook videos without watermark
        -How to download original facebook videos without watermark
        -How to download shared facebook videos without watermark
        -How to download liked facebook videos without watermark
        -How to download saved facebook videos without watermark

        -
          -
        • Watermarks can reduce the quality and clarity of the video, making it less enjoyable to watch.
        • -
        • Watermarks can cover important parts of the video, such as faces, captions, or subtitles, making it hard to follow the content.
        • -
        • Watermarks can be annoying and distracting, especially if they are large, flashy, or animated.
        • -
        • Watermarks can make the video look unprofessional or unoriginal, especially if you want to share it with others or use it for your own purposes.
        • -
        -

        Benefits of downloading Facebook videos without watermark

        -

        Downloading Facebook videos without watermark can have some benefits for you, such as:

        -
          -
        • You can enjoy the video in its original quality and resolution, without any interference from the watermark.
        • -
        • You can see the whole content of the video, without missing any details or information that might be hidden by the watermark.
        • -
        • You can avoid any annoyance or distraction caused by the watermark, and focus on the content and message of the video.
        • -
        • You can share the video with others or use it for your own purposes, without worrying about infringing any intellectual property rights or violating any terms of service.
        • -
        -

        How to download Facebook videos without watermark using online tools

        -

        One of the easiest ways to download Facebook videos without watermark is to use online tools that can help you do that. Online tools are websites that allow you to paste the URL of a Facebook video and download it in various formats and qualities. You don't need to install anything on your device or sign up for anything. You just need a browser and an internet connection.

        -

        However, not all online tools are reliable or safe. Some of them might not work properly, have limited features, contain ads or malware, or require payment or registration. Therefore, you need to be careful when choosing an online tool to download Facebook videos without watermark.

        -

        To help you out, we have selected three of the best online tools to download Facebook videos without watermark, based on their features, ease of use, and safety. Here they are:

        -

        SaveFrom.net

        -

        Features

        -

        SaveFrom.net is one of the most popular and trusted online tools to download Facebook videos without watermark. It has the following features:

        -
          -
        • It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.
        • -
        • It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.
        • -
        • It has a browser extension that lets you download videos directly from the Facebook page, without visiting the website.
        • -
        • It is fast, easy, and free to use, without any ads or registration.
        • -
        -

        Steps

        -

        To download Facebook videos without watermark using SaveFrom.net, follow these steps:

        -
          -
        1. Copy the URL of the Facebook video that you want to download.
        2. -
        3. Go to SaveFrom.net and paste the URL in the search box.
        4. -
        5. Select the format and quality of the video that you want to download.
        6. -
        7. Click on the "Download" button and wait for the video to be saved on your device.
        8. -
        -

        Toolzu.com

        -

        Features

        -

        Toolzu.com is another great online tool to download Facebook videos without watermark. It has the following features:

        -
          -
        • It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.
        • -
        • It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.
        • -
        • It has a simple and user-friendly interface that makes it easy to use.
        • -
        • It is fast, reliable, and free to use, without any ads or registration.
        • -
        -

        Steps

        -

        To download Facebook videos without watermark using Toolzu.com, follow these steps:

        -
          -
        1. Copy the URL of the Facebook video that you want to download.
        2. -
        3. Go to Toolzu.com and paste the URL in the search box.
        4. -
        5. Select the format and quality of the video that you want to download.
        6. -
        7. Click on the "Download" button and wait for the video to be saved on your device.
        8. -
        -

        FDown.net

        -

        Features

        -

        FDown.net is a third online tool that can help you download Facebook videos without watermark. It has the following features:

        -
          -
        • It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.
        • -
        • It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.
        • -
        • It has a smart detection feature that automatically detects the best quality available for the video.
        • -
        • It is fast, secure, and free to use, without any ads or registration.
        • -
        -

        Steps

        -

        To download Facebook videos without watermark using FDown.net, follow these steps:

        -
          -
        1. Copy the URL of the Facebook video that you want to download.
        2. -
        3. Go to FDown.net and paste the URL in the search box.
        4. -
        5. Select the format and quality of the video that you want to download.
        6. -
        7. Click on the "Download" button and wait for the video to be saved on your device.
        8. -
        -

        How to download Facebook videos without watermark using mobile apps

        -

        If you prefer to use your mobile device to download Facebook videos without watermark, you can also use some mobile apps that can help you do that. Mobile apps are applications that you can install on your smartphone or tablet and use them offline. You don't need a browser or an internet connection. However, you need to make sure that the app is compatible with your device and operating system.

        -

        However, not all mobile apps are reliable or safe. Some of them might not work properly, have limited features, contain ads or malware, or require payment or registration. Therefore, you need to be careful when choosing a mobile app to download Facebook videos without watermark. To help you out, we have selected two of the best mobile apps to download Facebook videos without watermark, based on their features, ease of use, and safety. Here they are:

        -

        Snapsave Video Downloader for Facebook

        -

        Features

        -

        Snapsave Video Downloader for Facebook is a mobile app that can help you download Facebook videos without watermark. It has the following features:

        -
          -
        • It supports downloading videos from Facebook and other platforms, such as Instagram, TikTok, and more.
        • -
        • It allows you to choose the format and quality of the video, such as MP4, HD, SD, etc.
        • -
        • It has a built-in browser that lets you browse and download videos directly from the Facebook app or website.
        • -
        • It has a video player that lets you preview and play the downloaded videos offline.
        • -
        • It is easy, fast, and free to use, without any ads or registration.
        • -
        -

        Steps

        -

        To download Facebook videos without watermark using Snapsave Video Downloader for Facebook, follow these steps:

        -
          -
        1. Download and install the app from the Google Play Store or the App Store.
        2. -
        3. Open the app and tap on the "Facebook" icon to launch the built-in browser.
        4. -
        5. Login to your Facebook account and find the video that you want to download.
        6. -
        7. Tap on the video and then tap on the "Download" button at the bottom right corner of the screen.
        8. -
        9. Select the format and quality of the video that you want to download.
        10. -
        11. Wait for the video to be downloaded and saved on your device.
        12. -
        -

        Video Downloader for Facebook by ETM Video Downloader

        -

        Features

        -

        Video Downloader for Facebook by ETM Video Downloader is another mobile app that can help you download Facebook videos without watermark. It has the following features:

        -
          -
        • It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.
        • -
        • It allows you to choose the format and quality of the video, such as MP4, HD, SD, etc.
        • -
        • It has a smart detection feature that automatically detects and downloads videos from any link or page.
        • -
        • It has a video manager that lets you view, play, delete, or share the downloaded videos offline.
        • -
        • It is easy, fast, and free to use, without any ads or registration.
        • -
        -

        Steps

        -

        To download Facebook videos without watermark using Video Downloader for Facebook by ETM Video Downloader, follow these steps:

        -
          -
        1. Download and install the app from the Google Play Store.
        2. -
        3. Open the app and tap on the "Facebook" icon to launch the built-in browser.
        4. -
        5. Login to your Facebook account and find the video that you want to download.
        6. -
        7. Copy the URL of the video or tap on the "Share" button and then tap on "Copy Link".
        8. -
        9. Paste the URL in the app's search box or tap on the "Paste Link" button.
        10. -
        11. Select the format and quality of the video that you want to download.
        12. -
        13. Wait for the video to be downloaded and saved on your device.
        14. -
        -

        Conclusion

        -

        In conclusion, downloading Facebook videos without watermark can be beneficial for you if you want to enjoy them in their original quality and resolution, see their whole content without any interference, avoid any annoyance or distraction caused by the watermark, or share them with others or use them for your own purposes. However, not all Facebook videos are easy to download without watermark. You might need some tools or apps that can help you do that. In this article, we have shown you how to download Facebook videos without watermark using online tools and mobile apps. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

        -

        FAQ

        -

        Here are some frequently asked questions about downloading Facebook videos without watermark:

        -

        Is it legal to download Facebook videos without watermark?

        -

        The answer to this question depends on several factors, such as:

        -
          -
        • The source and ownership of the video. If the video belongs to someone else who has not given you permission to download it or use it for your own purposes, you might be violating their intellectual property rights or terms of service.
        • -
        • The purpose and intention of downloading the video. If you are downloading the video for personal use only, such as watching it offline or sharing it with your friends or family, you might not be breaking any laws. However, contrast, saturation, and more of the video.
        • -
        • Use the effects, filters, transitions, text, music, and more tools to enhance your video.
        • -
        • Click on the "Export" button and choose the format and quality of the video that you want to save.
        • -
        • Wait for the video to be exported and saved on your device.
        • -
    -

    InShot

    -

    InShot is a mobile app that can help you remove watermark from a Facebook video. It has the following features:

    - -

    To remove watermark from a Facebook video using InShot, follow these steps:

    -
      -
    1. Download and install the app from the Google Play Store or the App Store.
    2. -
    3. Open the app and tap on the "Video" icon to import the Facebook video that you want to remove watermark from.
    4. -
    5. Select the video and tap on the "Canvas" icon to crop out the watermark from the video.
    6. -
    7. Tap on the "Trim" icon to trim out any unwanted parts of the video.
    8. -
    9. Tap on the "Rotate" icon to rotate the video if needed.
    10. -
    11. Tap on the "Adjust" icon to adjust the brightness, contrast, saturation, and more of the video.
    12. -
    13. Tap on the "Filter" icon to add effects and filters to your video.
    14. -
    15. Tap on the "Sticker" icon to add stickers and text to your video.
    16. -
    17. Tap on the "Music" icon to add music or sound effects to your video.
    18. -
    19. Tap on the "Save" icon and choose the format and quality of the video that you want to save.
    20. -
    21. Wait for the video to be saved on your device.
    22. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download The Spike MOD APK and Enjoy All Characters Money and Max Level.md b/spaces/1phancelerku/anime-remove-background/Download The Spike MOD APK and Enjoy All Characters Money and Max Level.md deleted file mode 100644 index fd3d74e139484efb9afc5db3c63f562ade0b0266..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download The Spike MOD APK and Enjoy All Characters Money and Max Level.md +++ /dev/null @@ -1,76 +0,0 @@ - -

    Download Game The Spike Mod Apk: A Volleyball Game with Retro Graphics and Dynamic Gameplay

    -

    If you are looking for a fun and simple volleyball game that will keep you entertained for hours, then you should download game The Spike mod apk. The Spike is a 2D volleyball game developed by a high school indie team in Korea. It features retro arcade style graphics, speedy and powerful spikes, and a story mode that follows the student athletes who aim for the top. You can also play with your friends in local multiplayer mode and compete with other players online.

    -

    download game the spike mod apk


    Download Ziphttps://jinyurl.com/2uNOUj



    -

    In this article, we will tell you everything you need to know about The Spike mod apk, including its features, how to download and install it, and some tips and tricks for playing it. So, without further ado, let's get started!

    -

    Features of The Spike Mod Apk

    -

    The Spike mod apk is a modified version of the original game that gives you some extra benefits. Here are some of the features of The Spike mod apk:

    - -

    How to Download and Install The Spike Mod Apk

    -

    Downloading and installing The Spike mod apk is very easy. Just follow these simple steps:

    -
      -
    1. Step 1: Download the mod apk file from a trusted source. You can use this link to download it.
    2. -
    3. Step 2: Enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    4. -
    5. Step 3: Install the mod apk file and launch the game. Locate the downloaded file on your device storage and tap on it to install it. Once installed, open the game and enjoy!
    6. -
    -

    Tips and Tricks for Playing The Spike

    -

    The Spike is a game that requires skill, timing, and strategy. Here are some tips and tricks that will help you improve your performance:

    - -

    Conclusion

    -

    The Spike is a volleyball game that will make you feel the thrill and excitement of spiking the ball. It has retro graphics, dynamic gameplay, and a captivating story mode. You can also enjoy the game with unlimited money and gems, all characters and costumes unlocked, and no ads with The Spike mod apk. Download game The Spike mod apk now and experience the ultimate volleyball game!

    -

    How to download game the spike mod apk for free
    -Download game the spike mod apk latest version
    -Download game the spike mod apk with unlimited money
    -Download game the spike mod apk and unlock all characters
    -Download game the spike mod apk for android devices
    -Download game the spike mod apk for PC windows 10
    -Download game the spike mod apk offline mode
    -Download game the spike mod apk no root required
    -Download game the spike mod apk from 5play app[^1^]
    -Download game the spike mod apk with max level
    -Download game the spike mod apk with cheats and hacks
    -Download game the spike mod apk with high graphics quality
    -Download game the spike mod apk with multiplayer mode
    -Download game the spike mod apk with custom skins
    -Download game the spike mod apk with new features and updates
    -Download game the spike mod apk without ads and pop-ups
    -Download game the spike mod apk with easy installation guide
    -Download game the spike mod apk with fast download speed
    -Download game the spike mod apk with secure and safe link
    -Download game the spike mod apk with full unlocked gameplay
    -Download game the spike mod apk with original soundtrack and sound effects
    -Download game the spike mod apk with different difficulty levels and modes
    -Download game the spike mod apk with best reviews and ratings
    -Download game the spike mod apk with tips and tricks to win
    -Download game the spike mod apk with fun and addictive gameplay

    -

    FAQs

    -

    Here are some frequently asked questions about The Spike mod apk:

    -
      -
    1. Q1: Is The Spike mod apk safe to use?
    2. -

      A1: Yes, The Spike mod apk is safe to use as long as you download it from a reliable source. We have tested the mod apk file and found no viruses or malware. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus before installing it.

      -
    3. Q2: How to update The Spike mod apk?
    4. -

      A2: To update The Spike mod apk, you need to download the latest version of the mod apk file from the same source that you downloaded it from before. Then, you need to uninstall the previous version of the game and install the new one. You don't need to worry about losing your progress as it will be saved on your device.

      -
    5. Q3: How to play The Spike on PC?
    6. -

      A3: To play The Spike on PC, you need to use an Android emulator such as BlueStacks or NoxPlayer. These are software that allow you to run Android apps on your PC. You need to download and install the emulator on your PC and then download and install The Spike mod apk on it. Then, you can launch the game and play it on a bigger screen.

      -
    7. Q4: What are the best characters and costumes in The Spike?
    8. -

      A4: The best characters and costumes in The Spike depend on your personal preference and playstyle. However, some of the most popular ones are:

      - - - - - - - -
      CharacterCostumeSpecial Skill
      RyuNinjaShadow Clone: Creates a clone that spikes with him
      LisaCheerleaderCheer Up: Increases her team's speed and power
      JayPirateCannonball: Launches a powerful spike that stuns the opponent
      SophiaAngelHeaven's Blessing: Heals her team's HP and increases their accuracy
      KaiDemonHell's Curse: Reduces the opponent's HP and accuracy
      -
    9. Q5: How to get more money and gems in The Spike?
    10. -

      A5: With The Spike mod apk, you don't need to worry about getting more money and gems as you will have unlimited amounts of them. However, if you want to earn them legitimately, you can do so by completing story mode chapters, winning matches, completing achievements, watching ads, or buying them with real money.

      I hope you enjoyed reading this article and learned something new about The Spike mod apk. If you have any questions, comments, or feedback, please feel free to leave them below. We would love to hear from you and help you out. Thank you for your time and attention!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Experience the Adventure of One Piece with Haki Legend APK for Android and iOS.md b/spaces/1phancelerku/anime-remove-background/Experience the Adventure of One Piece with Haki Legend APK for Android and iOS.md deleted file mode 100644 index c3a140df7f4e2957646987fe2366b2312e8757bb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Experience the Adventure of One Piece with Haki Legend APK for Android and iOS.md +++ /dev/null @@ -1,108 +0,0 @@ -
      -

      One Piece Haki Legend APK: A New Adventure in the Grand Line

      -

      If you are a fan of One Piece, the popular manga and anime series by Eiichiro Oda, you will love One Piece Haki Legend APK, a new mobile game that lets you experience the thrilling adventures of Luffy and his crew in the Grand Line. In this game, you can create your own pirate crew, fight against enemies, explore islands, and discover the secrets of haki, the mysterious power that grants superhuman abilities to those who master it.

      -

      one piece haki legend apk


      Download Filehttps://jinyurl.com/2uNPq1



      -

      What is One Piece Haki Legend APK?

      -

      One Piece Haki Legend APK is a role-playing game based on the One Piece franchise. It is developed by VDOMDHTMLtml, a Chinese game studio that specializes in anime-themed games. The game was released in September 2023 for Android devices, and it has received positive reviews from players and critics alike. The game is not available on Google Play Store, but you can download it from the official website.

      -

      Features of One Piece Haki Legend APK

      -

      One Piece Haki Legend APK has many features that make it stand out from other One Piece games. Here are some of them:

      -

      Stunning graphics and animations

      -

      The game boasts high-quality graphics and animations that capture the essence of the original manga and anime. The characters are designed with great detail and accuracy, and they have expressive facial expressions and movements. The environments are also rich and colorful, with dynamic weather effects and lighting. The game also has cinematic cutscenes that enhance the story and immerse you in the world of One Piece.

      -

      one piece haki legend mobile download
      -haki legend mobile gameplay android ios
      -one piece rpg game haki legend apk
      -haki legend mobile grand open apk
      -one piece haki legend mod apk unlimited money
      -haki legend mobile one piece online game
      -one piece haki legend apk latest version
      -haki legend mobile free download for android
      -one piece haki legend hack apk no root
      -haki legend mobile best characters and skills
      -one piece haki legend apk english version
      -haki legend mobile tips and tricks guide
      -one piece haki legend review and rating
      -haki legend mobile how to get devil fruits
      -one piece haki legend apk offline mode
      -haki legend mobile update and patch notes
      -one piece haki legend cheats and codes
      -haki legend mobile system requirements and compatibility
      -one piece haki legend apk obb data file
      -haki legend mobile pvp and guild features
      -one piece haki legend apk pure download link
      -haki legend mobile story and quests walkthrough
      -one piece haki legend apk mirror direct download
      -haki legend mobile support and feedback forum
      -one piece haki legend apk for pc windows 10
      -haki legend mobile redeem codes and coupons
      -one piece haki legend apk mod menu unlocked
      -haki legend mobile events and rewards list
      -one piece haki legend apk uptodown safe download
      -haki legend mobile wiki and database information
      -one piece haki legend apk rexdl fast download
      -haki legend mobile tier list and rankings
      -one piece haki legend apk revdl secure download
      -haki legend mobile facebook and twitter page
      -one piece haki legend apk happymod easy download
      -haki legend mobile discord and reddit community
      -one piece haki legend apk an1.com verified download
      -haki legend mobile youtube and twitch channel
      -one piece haki legend apk apkpure.com trusted download
      -haki legend mobile google play and app store link

      -

      Original story and characters

      -

      The game follows an original story that is faithful to the canon of One Piece. You will meet familiar characters such as Luffy, Zoro, Nami, Sanji, Chopper, Robin, Franky, Brook, Law, Sabo, Ace, Shanks, Mihawk, Doflamingo, Kaido, Big Mom, Blackbeard, and many more. You will also encounter new characters that are exclusive to the game, such as your own pirate crew members and allies. The game also features original voice acting from the Japanese cast of the anime.

      -

      Various game modes and challenges

      -

      The game offers a variety of game modes and challenges that will keep you entertained for hours. You can play the main story mode, where you will follow the plot of the game and complete missions. You can also play the side story mode, where you will explore different islands and scenarios that are not related to the main story. You can also play the arena mode, where you will compete against other players in real-time battles. You can also play the guild mode, where you will join forces with other players in co-op missions.

      -

      Real-time battles and co-op missions

      -

      The game features real-time battles that are fast-paced and exciting. You can control up to four characters at once, each with their own skills and abilities. You can switch between them at any time during combat, depending on the situation. You can also use haki skills to unleash powerful attacks or defend yourself from enemy attacks. The game also supports co-op missions, where you can team up with other players online to take down bosses or complete

      Customize your own pirate crew

      -

      The game allows you to customize your own pirate crew, with up to 12 members. You can choose from a variety of characters, each with their own personality, appearance, and skills. You can also equip them with different weapons, accessories, and costumes. You can also name your crew and design your own pirate flag. You can also interact with your crew members and increase their friendship level, which will unlock more dialogue and events.

      -

      How to download and install One Piece Haki Legend APK?

      -

      If you want to play One Piece Haki Legend APK, you will need to download and install it on your Android device. Here are the steps you need to follow:

      -

      Step 1: Download the APK file from the official website

      -

      The first thing you need to do is to download the APK file from the official website. The file size is about 1.5 GB, so make sure you have enough storage space and a stable internet connection. You can also scan the QR code on the website to download the file directly to your device.

      -

      Step 2: Enable unknown sources on your device

      -

      The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.

      -

      Step 3: Install the APK file and launch the game

      -

      The final thing you need to do is to install the APK file and launch the game. To do this, locate the downloaded file in your file manager and tap on it. You may also need to grant some permissions to the app by tapping Install or Accept. Once the installation is complete, you can tap Open or Launch to start the game. You may also need to download some additional data before you can play the game.

      -

      Tips and tricks for playing One Piece Haki Legend APK

      -

      Now that you have downloaded and installed One Piece Haki Legend APK, you may want some tips and tricks to help you play the game better. Here are some of them:

      -

      Learn the basics of combat and haki skills

      -

      One of the most important aspects of the game is combat. You will need to learn how to use your characters' skills and haki skills effectively. Each character has four skills: a normal attack, a special attack, a passive skill, and a haki skill. You can tap on the skill icons to activate them, or swipe on the screen to move or dodge. You can also use haki skills by tapping on the haki icon at the bottom of the screen. There are three types of haki: observation haki, armament haki, and conqueror's haki. Each type has different effects and uses, such as increasing your attack power, defense power, or stunning enemies.

      -

      Upgrade your characters and equipment

      -

      Another important aspect of the game is upgrading your characters and equipment. You will need to collect resources such as gold, gems, materials, and fragments to do this. You can use gold and gems to level up your characters' skills and haki skills. You can use materials to enhance your equipment's attributes and quality. You can use fragments to unlock new characters or upgrade their stars. You can obtain these resources by completing missions, events, or quests.

      -

      Join a guild and participate in events

      -

      A fun way to enjoy the game is to join a guild and participate in events. A guild is a group of players who share a common interest in One Piece. You can join an existing guild or create your own guild with your friends. By joining a guild, you can chat with other members, exchange gifts, request help, or donate resources. You can also participate in guild events such as guild wars, guild raids, or guild missions. These events will reward you with exclusive items and benefits.

      -

      Explore the map and collect resources

      -

      A great way to experience the game is to explore the map and collect resources. The map is divided into different regions, each with its own theme and scenery. You can travel between regions by using your ship or fast travel points. You can also find various resources on the map, such as chests, treasure maps, fruits, fish, or animals. These resources will help you upgrade your characters and equipment.

      -

      Conclusion

      -

      One Piece Haki Legend APK is a fantastic game for One Piece fans and RPG lovers alike. It has stunning graphics and animations, original story and characters, various game modes and challenges, real-time battles and co-op missions, and a lot of customization options. You can download and install the game easily by following the steps in this article. You can also use the tips and tricks in this article to improve your gameplay and have more fun. If you are looking for a new adventure in the Grand Line, One Piece Haki Legend APK is the game for you.

      -

      FAQs

      -

      Here are some frequently asked questions about One Piece Haki Legend APK:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      Is One Piece Haki Legend APK free to play?Yes, One Piece Haki Legend APK is free to play, but it also has some optional in-app purchases that can enhance your gaming experience.
      Is One Piece Haki Legend APK safe to download and install?Yes, One Piece Haki Legend APK is safe to download and install, as long as you use the official website or a trusted source. You should also scan the file with an antivirus program before installing it.
      Is One Piece Haki Legend APK compatible with my device?One Piece Haki Legend APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not run the game smoothly or have some bugs. You can check the compatibility of your device on the official website or contact the customer service for more information.
      How can I contact the customer service of One Piece Haki Legend APK?You can contact the customer service of One Piece Haki Legend APK by using the in-game feedback system or by sending an email to op_hakilegend@vdomdhtml.com. You can also follow the official Facebook page or join the official Discord server for more updates and support.
      How can I support the development of One Piece Haki Legend APK?You can support the development of One Piece Haki Legend APK by giving it a positive rating and review on the official website or on other platforms. You can also share the game with your friends and family, or make a donation through the in-game store.
      - : https://www.onepiecehakilegend.com/ : https://www.facebook.com/OnePieceHakiLegend : https://discord.gg/onepiecehakilegend

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_new.py b/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_new.py deleted file mode 100644 index 0c13e60b0dd136d9115a535101c6dbb2a25c6833..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/spaces/A666sxr/Genshin_TTS/preprocess.py b/spaces/A666sxr/Genshin_TTS/preprocess.py deleted file mode 100644 index aaedbf076c30114b3ac6c27dfb42fd54ac81a71c..0000000000000000000000000000000000000000 --- a/spaces/A666sxr/Genshin_TTS/preprocess.py +++ /dev/null @@ -1,25 +0,0 @@ -import argparse -import text -from utils import load_filepaths_and_text - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--out_extension", default="cleaned") - parser.add_argument("--text_index", default=1, type=int) - parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"]) - parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"]) - - args = parser.parse_args() - - - for filelist in args.filelists: - print("START:", filelist) - filepaths_and_text = load_filepaths_and_text(filelist) - for i in range(len(filepaths_and_text)): - original_text = filepaths_and_text[i][args.text_index] - cleaned_text = text._clean_text(original_text, args.text_cleaners) - filepaths_and_text[i][args.text_index] = cleaned_text - - new_filelist = filelist + "." + args.out_extension - with open(new_filelist, "w", encoding="utf-8") as f: - f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) diff --git a/spaces/AIConsultant/MusicGen/audiocraft/optim/__init__.py b/spaces/AIConsultant/MusicGen/audiocraft/optim/__init__.py deleted file mode 100644 index f48c17dfafa9a2be46a91ed1fb64f54c5572a730..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/optim/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Optimization stuff. In particular, optimizers (DAdaptAdam), schedulers -and Exponential Moving Average. -""" - -# flake8: noqa -from .cosine_lr_scheduler import CosineLRScheduler -from .dadam import DAdaptAdam -from .inverse_sqrt_lr_scheduler import InverseSquareRootLRScheduler -from .linear_warmup_lr_scheduler import LinearWarmupLRScheduler -from .polynomial_decay_lr_scheduler import PolynomialDecayLRScheduler -from .ema import ModuleDictEMA diff --git a/spaces/AIFILMS/riffusion-playground/README.md b/spaces/AIFILMS/riffusion-playground/README.md deleted file mode 100644 index c052d89b1ee9ed07e5d1f51d0b2c969e797a6728..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/riffusion-playground/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Riffusion Playground -emoji: 📚 -colorFrom: red -colorTo: purple -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -license: mit -duplicated_from: riffusion/riffusion-playground ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py deleted file mode 100644 index a0e0c5932838281e912079e5784d84d43444a61a..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from torch.optim import * # NOQA -from .radam import * # NOQA diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/GPTalk.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/GPTalk.py deleted file mode 100644 index c85399c1dbf0d2d23d5b8e02b7061e201610f242..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/GPTalk.py +++ /dev/null @@ -1,83 +0,0 @@ -from __future__ import annotations - -import secrets, time, json -from aiohttp import ClientSession -from typing import AsyncGenerator - -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - - -class GPTalk(AsyncGeneratorProvider): - url = "https://gptalk.net" - supports_gpt_35_turbo = True - working = True - _auth = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - timestamp = int(time.time()) - headers = { - 'authority': 'gptalk.net', - 'accept': '*/*', - 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2', - 'content-type': 'application/json', - 'origin': 'https://gptalk.net', - 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Linux"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36', - 'x-auth-appid': '2229', - 'x-auth-openid': '', - 'x-auth-platform': '', - 'x-auth-timestamp': f"{timestamp}", - } - async with ClientSession(headers=headers) as session: - if not cls._auth or cls._auth["expires_at"] < timestamp: - data = { - "fingerprint": secrets.token_hex(16).zfill(32), - "platform": "fingerprint" - } - async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response: - response.raise_for_status() - cls._auth = (await response.json())["data"] - data = { - "content": format_prompt(messages), - "accept": "stream", - "from": 1, - "model": model, - "is_mobile": 0, - "user_agent": headers["user-agent"], - "is_open_ctx": 0, - "prompt": "", - "roid": 111, - "temperature": 0, - "ctx_msg_count": 3, - "created_at": timestamp - } - headers = { - 'authorization': f'Bearer {cls._auth["token"]}', - } - async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response: - response.raise_for_status() - token = (await response.json())["data"]["token"] - last_message = "" - async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response: - response.raise_for_status() - async for line in response.content: - if line.startswith(b"data: "): - if line.startswith(b"data: [DONE]"): - break - message = json.loads(line[6:-1])["content"] - yield message[len(last_message):] - last_message = message \ No newline at end of file diff --git a/spaces/Aditya9790/yolo7-object-tracking/deploy/triton-inference-server/labels.py b/spaces/Aditya9790/yolo7-object-tracking/deploy/triton-inference-server/labels.py deleted file mode 100644 index ba6c5c516fcd1149233f34d73bb46d472a2bfed4..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/deploy/triton-inference-server/labels.py +++ /dev/null @@ -1,83 +0,0 @@ -from enum import Enum - -class COCOLabels(Enum): - PERSON = 0 - BICYCLE = 1 - CAR = 2 - MOTORBIKE = 3 - AEROPLANE = 4 - BUS = 5 - TRAIN = 6 - TRUCK = 7 - BOAT = 8 - TRAFFIC_LIGHT = 9 - FIRE_HYDRANT = 10 - STOP_SIGN = 11 - PARKING_METER = 12 - BENCH = 13 - BIRD = 14 - CAT = 15 - DOG = 16 - HORSE = 17 - SHEEP = 18 - COW = 19 - ELEPHANT = 20 - BEAR = 21 - ZEBRA = 22 - GIRAFFE = 23 - BACKPACK = 24 - UMBRELLA = 25 - HANDBAG = 26 - TIE = 27 - SUITCASE = 28 - FRISBEE = 29 - SKIS = 30 - SNOWBOARD = 31 - SPORTS_BALL = 32 - KITE = 33 - BASEBALL_BAT = 34 - BASEBALL_GLOVE = 35 - SKATEBOARD = 36 - SURFBOARD = 37 - TENNIS_RACKET = 38 - BOTTLE = 39 - WINE_GLASS = 40 - CUP = 41 - FORK = 42 - KNIFE = 43 - SPOON = 44 - BOWL = 45 - BANANA = 46 - APPLE = 47 - SANDWICH = 48 - ORANGE = 49 - BROCCOLI = 50 - CARROT = 51 - HOT_DOG = 52 - PIZZA = 53 - DONUT = 54 - CAKE = 55 - CHAIR = 56 - SOFA = 57 - POTTEDPLANT = 58 - BED = 59 - DININGTABLE = 60 - TOILET = 61 - TVMONITOR = 62 - LAPTOP = 63 - MOUSE = 64 - REMOTE = 65 - KEYBOARD = 66 - CELL_PHONE = 67 - MICROWAVE = 68 - OVEN = 69 - TOASTER = 70 - SINK = 71 - REFRIGERATOR = 72 - BOOK = 73 - CLOCK = 74 - VASE = 75 - SCISSORS = 76 - TEDDY_BEAR = 77 - HAIR_DRIER = 78 - TOOTHBRUSH = 79 diff --git a/spaces/AlanMars/QYL-AI-Space/app.py b/spaces/AlanMars/QYL-AI-Space/app.py deleted file mode 100644 index fa48e71be9ccd7e57147438b4d35ff484133cc92..0000000000000000000000000000000000000000 --- a/spaces/AlanMars/QYL-AI-Space/app.py +++ /dev/null @@ -1,631 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.models.models import get_model - -gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages -gr.Chatbot.postprocess = postprocess - -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - - -def create_new_model(current_system_prompt="You are a helpful assistant."): - return get_model(model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, system_prompt=current_system_prompt)[0] - - -def get_system_prompt_content_by_role_name(role_name: str) -> str: - template_name_list = get_template_names(plain=True) - template_role_prompt_dict = load_template(template_name_list[0], mode=2) # [act:prompt] - prompt_content = template_role_prompt_dict[role_name] - return prompt_content - - -def get_role_name_by_id(prompt_id) -> str: - template_name_list = get_template_names(plain=True) - template_id_role_dict = load_template(template_name_list[0], mode=3) # [id:act]) - role_name = template_id_role_dict[prompt_id] - return role_name - - -def get_user_key_by_user_name(user_login_name: str) -> str: - user_key_pairs_dict = {row[0]: row[1] for row in user_key_pairs_list} - - return user_key_pairs_dict[user_login_name] - - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - # Session State - user_name = gr.State("") - user_question = gr.State("") - user_topic = gr.State(i18n("未命名对话历史记录")) - - assert type(my_api_key) == str - user_api_key = gr.State(my_api_key) - current_model = gr.State(create_new_model()) - current_prompt_template = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - - # Header - with gr.Row(): - gr.HTML(CHUANHU_TITLE, elem_id="app_title") - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - with gr.Row(elem_id="float_display", visible=True): - user_info = gr.Markdown(value="getting user info...", elem_id="user_info") - - # Body - with gr.Row().style(equal_height=True): - # Left Panel - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(label="QYL Chat", elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(min_width=225, scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder=i18n("在这里输入") - ).style(container=False) - with gr.Column(min_width=42, scale=1): - submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn") - cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn") - with gr.Row(): - emptyBtn = gr.Button( - i18n("🧹 新的对话"), elem_id="empty_btn" - ) - retryBtn = gr.Button(i18n("🔄 重新生成")) - delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"), visible=False) - delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"), visible=False) - with gr.Row(visible=False) as like_dislike_area: - with gr.Column(min_width=20, scale=1): - likeBtn = gr.Button(i18n("👍")) - with gr.Column(min_width=20, scale=1): - dislikeBtn = gr.Button(i18n("👎")) - # Right Panel - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label=i18n("对话")): - # with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True): - with gr.Accordion(label=i18n("加载对话历史记录"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label=i18n("从列表中加载对话"), - choices=get_history_names(plain=True), - multiselect=False - ).style(container=False) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(visible=False): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=i18n("设置文件名: 默认为.json,可选为.md"), - label=i18n("设置保存文件名"), - value=i18n("对话历史记录"), - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button(i18n("💾 保存对话")) - exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown")) - gr.Markdown(i18n("默认保存于history文件夹")) - with gr.Row(visible=False): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label=i18n("智人")): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=i18n(f"在这里输入System Prompt...\n\n Current system prompt: {INITIAL_SYSTEM_PROMPT}"), - label="System prompt", - value=INITIAL_SYSTEM_PROMPT, - lines=12, - visible=False, - ).style(container=False) - with gr.Accordion(label=i18n("植入角色"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label=i18n("选择分类"), - choices=[os.path.splitext(f)[0] for f in get_template_names(plain=True)], - multiselect=False, - value=os.path.splitext(get_template_names(plain=True)[0])[0] - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label=i18n("选择职能"), - choices=load_template(get_template_names(plain=True)[0], mode=1), - multiselect=False, - value=load_template(get_template_names(plain=True)[0], mode=1)[0] - ).style(container=False) - model_select_dropdown = gr.Dropdown( - label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], - interactive=True, visible=False - ) - lora_select_dropdown = gr.Dropdown( - label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False - ) - with gr.Row(): - use_streaming_checkbox = gr.Checkbox(label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION, interactive=False) - single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False, visible=False) - use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False, visible=False) - # render_latex_checkbox = gr.Checkbox(label=i18n("渲染LaTeX公式"), value=render_latex, interactive=True, elem_id="render_latex_checkbox") - language_select_dropdown = gr.Dropdown( - label=i18n("选择回复语言(针对搜索&索引功能)"), - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - visible=False - ) - index_files = gr.Files(label=i18n("上传"), type="file", visible=False) - two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False), - visible=False) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label=i18n("高级")): - # gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")) - # gr.HTML(APPEARANCE_SWITCHER, elem_classes="insert_block") - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"Your API-key...", - value=hide_middle_chars(user_api_key.value), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", - elem_classes="insert_block") - else: - usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", - elem_classes="insert_block") - with gr.Accordion(i18n("参数"), open=True): - temperature_slider = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label=i18n("创意度") - ) - top_p_slider = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label=i18n("top-p"), - visible=False - ) - n_choices_slider = gr.Slider( - minimum=1, - maximum=10, - value=1, - step=1, - interactive=True, - label=i18n("n choices"), - visible=False - ) - stop_sequence_txt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入停止符,用英文逗号隔开..."), - label="stop", - value="", - lines=1, - visible=False, - ) - max_context_length_slider = gr.Slider( - minimum=1, - maximum=32768, - value=2000, - step=1, - interactive=True, - label=i18n("max context"), - visible=False - ) - max_generation_slider = gr.Slider( - minimum=1, - maximum=32768, - value=1000, - step=1, - interactive=True, - label=i18n("max generations"), - visible=False - ) - presence_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label=i18n("presence penalty"), - visible=False - ) - frequency_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label=i18n("frequency penalty"), - visible=False - ) - logit_bias_txt = gr.Textbox( - show_label=True, - placeholder=f"word:likelihood", - label="logit bias", - value="", - lines=1, - visible=False - ) - user_identifier_txt = gr.Textbox( - show_label=True, - placeholder=i18n("用于定位滥用行为"), - label=i18n("用户名"), - value=user_name.value, - lines=1, - visible=False - ) - - with gr.Accordion(i18n("网络设置"), open=False, visible=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入API-Host..."), - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1 - ) - changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址")) - proxyTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入代理地址..."), - label=i18n("代理地址(示例:http://127.0.0.1:10809)"), - value="", - lines=2 - ) - changeProxyBtn = gr.Button(i18n("🔄 设置代理地址")) - default_btn = gr.Button(i18n("🔙 恢复默认设置")) - - # Footer - gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description") - gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer") - - - def create_greeting(request: gr.Request): - """ - Access username from gr.Request class. Be able to access current username from application #3259 - https://github.com/gradio-app/gradio/pull/3296 - """ - # Update System Prompt - show_system_prompt = False - current_user_prompts = [] - current_user_api_key = "" - - # Update User Profile - user_request_name = ANONYMOUS_USER # using anonymous - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get user name from gr.Request: {request.username}") - if request.username == 'admin001' or request.username == 'admin002' or request.username == 'admin003': - show_system_prompt = True - logging.info(f"show_system_prompt: {show_system_prompt}") - - # Update user prompt - current_user_prompts = load_user_prompts(request.username) - - # Update user API KEY - current_user_api_key = get_user_key_by_user_name(request.username) - logging.debug(f"Current user and key pairs: {request.username}:{current_user_api_key}") - - user_request_name = hide_username(request.username) # just show the last N character - else: - logging.info(f"Failed to get user name from gr.Request. ANONYMOUS_USER: {user_request_name}") - current_user_prompts = load_user_prompts(user_request_name) - logging.info(f"current_user_prompts: {current_user_prompts}") - - if show_system_prompt: - user_info_string = gr.Markdown.update(value=i18n(f"Admin: {user_request_name}")) - else: - user_info_string = gr.Markdown.update(value=i18n(f"User: {user_request_name}")) - - # Update current model - first_system_prompt = get_system_prompt_content_by_role_name(current_user_prompts[0]) - current_model = create_new_model(first_system_prompt) - current_model.set_user_identifier(user_request_name) - - # Update chatBot - chatbot = gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]) - - return user_info_string, user_request_name, \ - current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *current_model.auto_load(), \ - get_history_names(False, user_request_name), chatbot, gr.update(visible=show_system_prompt), \ - gr.update(choices=current_user_prompts, value=current_user_prompts[0]), \ - gr.update(value=current_user_api_key), gr.update(value=current_user_api_key) - - - demo.load(fn=create_greeting, - inputs=None, - outputs=[user_info, user_name, current_model, like_dislike_area, systemPromptTxt, chatbot, - historyFileSelectDropdown, chatbot, systemPromptTxt, templateSelectDropdown, user_api_key, keyTxt], - api_name="load") - - # Debugging - ''' - logging.info( - colorama.Back.GREEN - + f"\nAfter demo.load() gr.systemPromptTxt: {systemPromptTxt.value}" - + f"\nAfter demo.load() gr.State.current_prompt_template: {current_prompt_template.value}" - + f"\nAfter demo.load() gr.State.current_model.system_prompt: {current_model.value.system_prompt}" - + colorama.Style.RESET_ALL - ) - ''' - - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - current_model, - user_question, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, status_display], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], - show_progress=True - ) - - get_usage_args = dict( - fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False - ) - - load_history_from_file_args = dict( - fn=load_chat_history, - inputs=[current_model, historyFileSelectDropdown, user_name], - outputs=[saveFileName, systemPromptTxt, chatbot] - ) - - # Chatbot - cancelBtn.click(interrupt, [current_model], []) - ''' Running Events Consecutively - run events consecutively by using the then method of an event listener. This will run an event after the previous - event has finished running. This is useful for running events that update components in multiple steps. - The .then() method of an event listener executes the subsequent event regardless of whether the previous event - raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, - use the .success() method, which takes the same arguments as .then(). - ''' - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args, api_name="predict").then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - index_files.change(handle_file_upload, [current_model, index_files, chatbot], - [index_files, chatbot, status_display]) - - emptyBtn.click( - reset, - inputs=[current_model], - outputs=[chatbot, status_display], - show_progress=True, - ) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - current_model, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - [chatbot, status_display], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [current_model], - [status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [current_model, chatbot], - [chatbot, status_display], - show_progress=False - ) - - likeBtn.click( - like, - [current_model], - [status_display], - show_progress=False - ) - - dislikeBtn.click( - dislike, - [current_model], - [status_display], - show_progress=False - ) - - two_column.change(update_doc_config, [two_column], None) - - # LLM Models - keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then( - **get_usage_args) - keyTxt.submit(**get_usage_args) - single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None) - model_select_dropdown.change(get_model, - [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, - top_p_slider, systemPromptTxt, user_name], - [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, - api_name="get_model") - model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], - show_progress=False) - lora_select_dropdown.change(get_model, - [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, - top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], - show_progress=True) - - # Template - systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None) - templateRefreshBtn.click(get_template_names_without_extension, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [current_prompt_template, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [current_prompt_template, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change(**load_history_from_file_args) - downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], - [saveFileName, systemPromptTxt, chatbot]) - - # Advanced - max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None) - temperature_slider.change(set_temperature, [current_model, temperature_slider], None) - top_p_slider.change(set_top_p, [current_model, top_p_slider], None) - n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None) - stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None) - max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None) - presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None) - frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None) - logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None) - user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None) - - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + f"\n温馨提示:访问 http://{server_name}:{server_port} 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = i18n("启源力 AI") - -'''Control the rate of processed requests by creating a queue. This will allow you to set the number of requests to -be processed at one time, and will let users know their position in the queue. Number of worker threads that will be -processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are -processed, but will also increase the memory usage of the queue. ''' -demo.queue(concurrency_count=CONCURRENT_COUNT) - -if __name__ == "__main__": - reload_javascript() - - # HF deploy - demo.launch( - share=False, - auth=auth_list if authflag else None, - auth_message=i18n("启源力智人AI助理"), - favicon_path="./assets/favicon-64.png" - ) - - ''' - # Cloud deploy - demo.launch( - server_name=server_name, - server_port=server_port, - share=False, - auth=auth_list if authflag else None, - auth_message=i18n("启源力智人AI助理"), - favicon_path="./assets/favicon-64.png" - ) - ''' - - ''' - # Local deploy - demo.launch( - server_name=server_name, - server_port=server_port, - share=True, - auth=auth_list if authflag else None, - auth_message=i18n("启源力智人AI助理"), - favicon_path="./assets/favicon-64.png", - inbrowser=not dockerflag # 禁止在docker下开启inbrowser - ) - ''' - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - # server_name=server_name, - # server_port=server_port, - # share=share, - # auth=auth_list if authflag else None, - # favicon_path="./assets/favicon-64.png", - # inbrowser=not dockerflag, # 禁止在docker下开启inbrowser - # ) - - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/Alpaca233/ChatPDF-GUI/app.py b/spaces/Alpaca233/ChatPDF-GUI/app.py deleted file mode 100644 index 5d13d3c7d83ea66b64d10dd328091c3b3bdb100b..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/ChatPDF-GUI/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import gradio as gr - -from gpt_reader.pdf_reader import PaperReader -from gpt_reader.prompt import BASE_POINTS - - -class GUI: - def __init__(self): - self.api_key = "" - self.session = "" - - def analyse(self, api_key, pdf_file): - self.session = PaperReader(api_key, points_to_focus=BASE_POINTS) - return self.session.read_pdf_and_summarize(pdf_file) - - def ask_question(self, question): - if self.session == "": - return "Please upload PDF file first!" - return self.session.question(question) - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # CHATGPT-PAPER-READER - """) - - with gr.Tab("Upload PDF File"): - pdf_input = gr.File(label="PDF File") - api_input = gr.Textbox(label="OpenAI API Key") - result = gr.Textbox(label="PDF Summary") - upload_button = gr.Button("Start Analyse") - with gr.Tab("Ask question about your PDF"): - question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?") - answer = gr.Textbox(label="Answer") - ask_button = gr.Button("Ask") - with gr.Accordion("About this project"): - gr.Markdown( - """## CHATGPT-PAPER-READER📝 - This repository provides a simple interface that utilizes the gpt-3.5-turbo - model to read academic papers in PDF format locally. You can use it to help you summarize papers, - create presentation slides, or simply fulfill tasks assigned by your supervisor.\n - [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""") - - app = GUI() - upload_button.click(fn=app.analyse, inputs=[api_input, pdf_input], outputs=result) - ask_button.click(app.ask_question, inputs=question_input, outputs=answer) - -if __name__ == "__main__": - demo.title = "CHATGPT-PAPER-READER" - demo.launch() # add "share=True" to share CHATGPT-PAPER-READER app on Internet. diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/img2img.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/img2img.md deleted file mode 100644 index b3de84c0f4eb72f3fb2871e5d78d80a812de548f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/img2img.md +++ /dev/null @@ -1,55 +0,0 @@ - - -# Image-to-image - -The Stable Diffusion model can also be applied to image-to-image generation by passing a text prompt and an initial image to condition the generation of new images. - -The [`StableDiffusionImg2ImgPipeline`] uses the diffusion-denoising mechanism proposed in [SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations](https://huggingface.co/papers/2108.01073) by Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, Stefano Ermon. - -The abstract from the paper is: - -*Guided image synthesis enables everyday users to create and edit photo-realistic images with minimum effort. The key challenge is balancing faithfulness to the user input (e.g., hand-drawn colored strokes) and realism of the synthesized image. Existing GAN-based methods attempt to achieve such balance using either conditional GANs or GAN inversions, which are challenging and often require additional training data or loss functions for individual applications. To address these issues, we introduce a new image synthesis and editing method, Stochastic Differential Editing (SDEdit), based on a diffusion model generative prior, which synthesizes realistic images by iteratively denoising through a stochastic differential equation (SDE). Given an input image with user guide of any type, SDEdit first adds noise to the input, then subsequently denoises the resulting image through the SDE prior to increase its realism. SDEdit does not require task-specific training or inversions and can naturally achieve the balance between realism and faithfulness. SDEdit significantly outperforms state-of-the-art GAN-based methods by up to 98.09% on realism and 91.72% on overall satisfaction scores, according to a human perception study, on multiple tasks, including stroke-based image synthesis and editing as well as image compositing.* - - - -Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - - - -## StableDiffusionImg2ImgPipeline - -[[autodoc]] StableDiffusionImg2ImgPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - - load_textual_inversion - - from_single_file - - load_lora_weights - - save_lora_weights - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput - -## FlaxStableDiffusionImg2ImgPipeline - -[[autodoc]] FlaxStableDiffusionImg2ImgPipeline - - all - - __call__ - -## FlaxStableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py deleted file mode 100644 index 08d0878db588aa38a2e602a3bc5f6505b9457575..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright 2022 The Music Spectrogram Diffusion Authors. -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import dataclasses -import math -import os -from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F - -from ...utils import is_note_seq_available -from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH - - -if is_note_seq_available(): - import note_seq -else: - raise ImportError("Please install note-seq via `pip install note-seq`") - - -INPUT_FEATURE_LENGTH = 2048 - -SAMPLE_RATE = 16000 -HOP_SIZE = 320 -FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) - -DEFAULT_STEPS_PER_SECOND = 100 -DEFAULT_MAX_SHIFT_SECONDS = 10 -DEFAULT_NUM_VELOCITY_BINS = 1 - -SLAKH_CLASS_PROGRAMS = { - "Acoustic Piano": 0, - "Electric Piano": 4, - "Chromatic Percussion": 8, - "Organ": 16, - "Acoustic Guitar": 24, - "Clean Electric Guitar": 26, - "Distorted Electric Guitar": 29, - "Acoustic Bass": 32, - "Electric Bass": 33, - "Violin": 40, - "Viola": 41, - "Cello": 42, - "Contrabass": 43, - "Orchestral Harp": 46, - "Timpani": 47, - "String Ensemble": 48, - "Synth Strings": 50, - "Choir and Voice": 52, - "Orchestral Hit": 55, - "Trumpet": 56, - "Trombone": 57, - "Tuba": 58, - "French Horn": 60, - "Brass Section": 61, - "Soprano/Alto Sax": 64, - "Tenor Sax": 66, - "Baritone Sax": 67, - "Oboe": 68, - "English Horn": 69, - "Bassoon": 70, - "Clarinet": 71, - "Pipe": 73, - "Synth Lead": 80, - "Synth Pad": 88, -} - - -@dataclasses.dataclass -class NoteRepresentationConfig: - """Configuration note representations.""" - - onsets_only: bool - include_ties: bool - - -@dataclasses.dataclass -class NoteEventData: - pitch: int - velocity: Optional[int] = None - program: Optional[int] = None - is_drum: Optional[bool] = None - instrument: Optional[int] = None - - -@dataclasses.dataclass -class NoteEncodingState: - """Encoding state for note transcription, keeping track of active pitches.""" - - # velocity bin for active pitches and programs - active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) - - -@dataclasses.dataclass -class EventRange: - type: str - min_value: int - max_value: int - - -@dataclasses.dataclass -class Event: - type: str - value: int - - -class Tokenizer: - def __init__(self, regular_ids: int): - # The special tokens: 0=PAD, 1=EOS, and 2=UNK - self._num_special_tokens = 3 - self._num_regular_tokens = regular_ids - - def encode(self, token_ids): - encoded = [] - for token_id in token_ids: - if not 0 <= token_id < self._num_regular_tokens: - raise ValueError( - f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" - ) - encoded.append(token_id + self._num_special_tokens) - - # Add EOS token - encoded.append(1) - - # Pad to till INPUT_FEATURE_LENGTH - encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) - - return encoded - - -class Codec: - """Encode and decode events. - - Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from - Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not - include things like EOS or UNK token handling. - - To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required - and specified separately. - """ - - def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): - """Define Codec. - - Args: - max_shift_steps: Maximum number of shift steps that can be encoded. - steps_per_second: Shift steps will be interpreted as having a duration of - 1 / steps_per_second. - event_ranges: Other supported event types and their ranges. - """ - self.steps_per_second = steps_per_second - self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) - self._event_ranges = [self._shift_range] + event_ranges - # Ensure all event types have unique names. - assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) - - @property - def num_classes(self) -> int: - return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) - - # The next couple methods are simplified special case methods just for shift - # events that are intended to be used from within autograph functions. - - def is_shift_event_index(self, index: int) -> bool: - return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) - - @property - def max_shift_steps(self) -> int: - return self._shift_range.max_value - - def encode_event(self, event: Event) -> int: - """Encode an event to an index.""" - offset = 0 - for er in self._event_ranges: - if event.type == er.type: - if not er.min_value <= event.value <= er.max_value: - raise ValueError( - f"Event value {event.value} is not within valid range " - f"[{er.min_value}, {er.max_value}] for type {event.type}" - ) - return offset + event.value - er.min_value - offset += er.max_value - er.min_value + 1 - - raise ValueError(f"Unknown event type: {event.type}") - - def event_type_range(self, event_type: str) -> Tuple[int, int]: - """Return [min_id, max_id] for an event type.""" - offset = 0 - for er in self._event_ranges: - if event_type == er.type: - return offset, offset + (er.max_value - er.min_value) - offset += er.max_value - er.min_value + 1 - - raise ValueError(f"Unknown event type: {event_type}") - - def decode_event_index(self, index: int) -> Event: - """Decode an event index to an Event.""" - offset = 0 - for er in self._event_ranges: - if offset <= index <= offset + er.max_value - er.min_value: - return Event(type=er.type, value=er.min_value + index - offset) - offset += er.max_value - er.min_value + 1 - - raise ValueError(f"Unknown event index: {index}") - - -@dataclasses.dataclass -class ProgramGranularity: - # both tokens_map_fn and program_map_fn should be idempotent - tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] - program_map_fn: Callable[[int], int] - - -def drop_programs(tokens, codec: Codec): - """Drops program change events from a token sequence.""" - min_program_id, max_program_id = codec.event_type_range("program") - return tokens[(tokens < min_program_id) | (tokens > max_program_id)] - - -def programs_to_midi_classes(tokens, codec): - """Modifies program events to be the first program in the MIDI class.""" - min_program_id, max_program_id = codec.event_type_range("program") - is_program = (tokens >= min_program_id) & (tokens <= max_program_id) - return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) - - -PROGRAM_GRANULARITIES = { - # "flat" granularity; drop program change tokens and set NoteSequence - # programs to zero - "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), - # map each program to the first program in its MIDI class - "midi_class": ProgramGranularity( - tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) - ), - # leave programs as is - "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), -} - - -def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): - """ - equivalent of tf.signal.frame - """ - signal_length = signal.shape[axis] - if pad_end: - frames_overlap = frame_length - frame_step - rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) - pad_size = int(frame_length - rest_samples) - - if pad_size != 0: - pad_axis = [0] * signal.ndim - pad_axis[axis] = pad_size - signal = F.pad(signal, pad_axis, "constant", pad_value) - frames = signal.unfold(axis, frame_length, frame_step) - return frames - - -def program_to_slakh_program(program): - # this is done very hackily, probably should use a custom mapping - for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): - if program >= slakh_program: - return slakh_program - - -def audio_to_frames( - samples, - hop_size: int, - frame_rate: int, -) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: - """Convert audio samples to non-overlapping frames and frame times.""" - frame_size = hop_size - samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") - - # Split audio into frames. - frames = frame( - torch.Tensor(samples).unsqueeze(0), - frame_length=frame_size, - frame_step=frame_size, - pad_end=False, # TODO check why its off by 1 here when True - ) - - num_frames = len(samples) // frame_size - - times = np.arange(num_frames) / frame_rate - return frames, times - - -def note_sequence_to_onsets_and_offsets_and_programs( - ns: note_seq.NoteSequence, -) -> Tuple[Sequence[float], Sequence[NoteEventData]]: - """Extract onset & offset times and pitches & programs from a NoteSequence. - - The onset & offset times will not necessarily be in sorted order. - - Args: - ns: NoteSequence from which to extract onsets and offsets. - - Returns: - times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for - note - offsets. - """ - # Sort by program and pitch and put offsets before onsets as a tiebreaker for - # subsequent stable sort. - notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) - times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] - values = [ - NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) - for note in notes - if not note.is_drum - ] + [ - NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) - for note in notes - ] - return times, values - - -def num_velocity_bins_from_codec(codec: Codec): - """Get number of velocity bins from event codec.""" - lo, hi = codec.event_type_range("velocity") - return hi - lo - - -# segment an array into segments of length n -def segment(a, n): - return [a[i : i + n] for i in range(0, len(a), n)] - - -def velocity_to_bin(velocity, num_velocity_bins): - if velocity == 0: - return 0 - else: - return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) - - -def note_event_data_to_events( - state: Optional[NoteEncodingState], - value: NoteEventData, - codec: Codec, -) -> Sequence[Event]: - """Convert note event data to a sequence of events.""" - if value.velocity is None: - # onsets only, no program or velocity - return [Event("pitch", value.pitch)] - else: - num_velocity_bins = num_velocity_bins_from_codec(codec) - velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) - if value.program is None: - # onsets + offsets + velocities only, no programs - if state is not None: - state.active_pitches[(value.pitch, 0)] = velocity_bin - return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] - else: - if value.is_drum: - # drum events use a separate vocabulary - return [Event("velocity", velocity_bin), Event("drum", value.pitch)] - else: - # program + velocity + pitch - if state is not None: - state.active_pitches[(value.pitch, value.program)] = velocity_bin - return [ - Event("program", value.program), - Event("velocity", velocity_bin), - Event("pitch", value.pitch), - ] - - -def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: - """Output program and pitch events for active notes plus a final tie event.""" - events = [] - for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): - if state.active_pitches[(pitch, program)]: - events += [Event("program", program), Event("pitch", pitch)] - events.append(Event("tie", 0)) - return events - - -def encode_and_index_events( - state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None -): - """Encode a sequence of timed events and index to audio frame times. - - Encodes time shifts as repeated single step shifts for later run length encoding. - - Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio - frame. This can be used e.g. to prepend events representing the current state to a targets segment. - - Args: - state: Initial event encoding state. - event_times: Sequence of event times. - event_values: Sequence of event values. - encode_event_fn: Function that transforms event value into a sequence of one - or more Event objects. - codec: An Codec object that maps Event objects to indices. - frame_times: Time for every audio frame. - encoding_state_to_events_fn: Function that transforms encoding state into a - sequence of one or more Event objects. - - Returns: - events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. - Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes - splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of - another. - event_end_indices: Corresponding end event index for every audio frame. Used - to ensure when slicing that one chunk ends where the next begins. Should always be true that - event_end_indices[i] = event_start_indices[i + 1]. - state_events: Encoded "state" events representing the encoding state before - each event. - state_event_indices: Corresponding state event index for every audio frame. - """ - indices = np.argsort(event_times, kind="stable") - event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] - event_values = [event_values[i] for i in indices] - - events = [] - state_events = [] - event_start_indices = [] - state_event_indices = [] - - cur_step = 0 - cur_event_idx = 0 - cur_state_event_idx = 0 - - def fill_event_start_indices_to_cur_step(): - while ( - len(event_start_indices) < len(frame_times) - and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second - ): - event_start_indices.append(cur_event_idx) - state_event_indices.append(cur_state_event_idx) - - for event_step, event_value in zip(event_steps, event_values): - while event_step > cur_step: - events.append(codec.encode_event(Event(type="shift", value=1))) - cur_step += 1 - fill_event_start_indices_to_cur_step() - cur_event_idx = len(events) - cur_state_event_idx = len(state_events) - if encoding_state_to_events_fn: - # Dump state to state events *before* processing the next event, because - # we want to capture the state prior to the occurrence of the event. - for e in encoding_state_to_events_fn(state): - state_events.append(codec.encode_event(e)) - - for e in encode_event_fn(state, event_value, codec): - events.append(codec.encode_event(e)) - - # After the last event, continue filling out the event_start_indices array. - # The inequality is not strict because if our current step lines up exactly - # with (the start of) an audio frame, we need to add an additional shift event - # to "cover" that frame. - while cur_step / codec.steps_per_second <= frame_times[-1]: - events.append(codec.encode_event(Event(type="shift", value=1))) - cur_step += 1 - fill_event_start_indices_to_cur_step() - cur_event_idx = len(events) - - # Now fill in event_end_indices. We need this extra array to make sure that - # when we slice events, each slice ends exactly where the subsequent slice - # begins. - event_end_indices = event_start_indices[1:] + [len(events)] - - events = np.array(events).astype(np.int32) - state_events = np.array(state_events).astype(np.int32) - event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) - event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) - state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) - - outputs = [] - for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): - outputs.append( - { - "inputs": events, - "event_start_indices": start_indices, - "event_end_indices": end_indices, - "state_events": state_events, - "state_event_indices": event_indices, - } - ) - - return outputs - - -def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): - """Extract target sequence corresponding to audio token segment.""" - features = features.copy() - start_idx = features["event_start_indices"][0] - end_idx = features["event_end_indices"][-1] - - features[feature_key] = features[feature_key][start_idx:end_idx] - - if state_events_end_token is not None: - # Extract the state events corresponding to the audio start token, and - # prepend them to the targets array. - state_event_start_idx = features["state_event_indices"][0] - state_event_end_idx = state_event_start_idx + 1 - while features["state_events"][state_event_end_idx - 1] != state_events_end_token: - state_event_end_idx += 1 - features[feature_key] = np.concatenate( - [ - features["state_events"][state_event_start_idx:state_event_end_idx], - features[feature_key], - ], - axis=0, - ) - - return features - - -def map_midi_programs( - feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" -) -> Mapping[str, Any]: - """Apply MIDI program map to token sequences.""" - granularity = PROGRAM_GRANULARITIES[granularity_type] - - feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) - return feature - - -def run_length_encode_shifts_fn( - features, - codec: Codec, - feature_key: str = "inputs", - state_change_event_types: Sequence[str] = (), -) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: - """Return a function that run-length encodes shifts for a given codec. - - Args: - codec: The Codec to use for shift events. - feature_key: The feature key for which to run-length encode shifts. - state_change_event_types: A list of event types that represent state - changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones - will be removed. - - Returns: - A preprocessing function that run-length encodes single-step shifts. - """ - state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] - - def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: - """Combine leading/interior shifts, trim trailing shifts. - - Args: - features: Dict of features to process. - - Returns: - A dict of features. - """ - events = features[feature_key] - - shift_steps = 0 - total_shift_steps = 0 - output = np.array([], dtype=np.int32) - - current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) - - for event in events: - if codec.is_shift_event_index(event): - shift_steps += 1 - total_shift_steps += 1 - - else: - # If this event is a state change and has the same value as the current - # state, we can skip it entirely. - is_redundant = False - for i, (min_index, max_index) in enumerate(state_change_event_ranges): - if (min_index <= event) and (event <= max_index): - if current_state[i] == event: - is_redundant = True - current_state[i] = event - if is_redundant: - continue - - # Once we've reached a non-shift event, RLE all previous shift events - # before outputting the non-shift event. - if shift_steps > 0: - shift_steps = total_shift_steps - while shift_steps > 0: - output_steps = np.minimum(codec.max_shift_steps, shift_steps) - output = np.concatenate([output, [output_steps]], axis=0) - shift_steps -= output_steps - output = np.concatenate([output, [event]], axis=0) - - features[feature_key] = output - return features - - return run_length_encode_shifts(features) - - -def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): - tie_token = codec.encode_event(Event("tie", 0)) - state_events_end_token = tie_token if note_representation_config.include_ties else None - - features = extract_sequence_with_indices( - features, state_events_end_token=state_events_end_token, feature_key="inputs" - ) - - features = map_midi_programs(features, codec) - - features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) - - return features - - -class MidiProcessor: - def __init__(self): - self.codec = Codec( - max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, - steps_per_second=DEFAULT_STEPS_PER_SECOND, - event_ranges=[ - EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), - EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), - EventRange("tie", 0, 0), - EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), - EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), - ], - ) - self.tokenizer = Tokenizer(self.codec.num_classes) - self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) - - def __call__(self, midi: Union[bytes, os.PathLike, str]): - if not isinstance(midi, bytes): - with open(midi, "rb") as f: - midi = f.read() - - ns = note_seq.midi_to_note_sequence(midi) - ns_sus = note_seq.apply_sustain_control_changes(ns) - - for note in ns_sus.notes: - if not note.is_drum: - note.program = program_to_slakh_program(note.program) - - samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) - - _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) - times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) - - events = encode_and_index_events( - state=NoteEncodingState(), - event_times=times, - event_values=values, - frame_times=frame_times, - codec=self.codec, - encode_event_fn=note_event_data_to_events, - encoding_state_to_events_fn=note_encoding_state_to_events, - ) - - events = [ - note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events - ] - input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] - - return input_tokens diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/pil_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/pil_utils.py deleted file mode 100644 index 15b97c73dcb7f85b22fcae95c641dde0123b5f05..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/pil_utils.py +++ /dev/null @@ -1,48 +0,0 @@ -import PIL.Image -import PIL.ImageOps -from packaging import version -from PIL import Image - - -if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): - PIL_INTERPOLATION = { - "linear": PIL.Image.Resampling.BILINEAR, - "bilinear": PIL.Image.Resampling.BILINEAR, - "bicubic": PIL.Image.Resampling.BICUBIC, - "lanczos": PIL.Image.Resampling.LANCZOS, - "nearest": PIL.Image.Resampling.NEAREST, - } -else: - PIL_INTERPOLATION = { - "linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - "nearest": PIL.Image.NEAREST, - } - - -def pt_to_pil(images): - """ - Convert a torch image to a PIL image. - """ - images = (images / 2 + 0.5).clamp(0, 1) - images = images.cpu().permute(0, 2, 3, 1).float().numpy() - images = numpy_to_pil(images) - return images - - -def numpy_to_pil(images): - """ - Convert a numpy image or a batch of images to a PIL image. - """ - if images.ndim == 3: - images = images[None, ...] - images = (images * 255).round().astype("uint8") - if images.shape[-1] == 1: - # special case for grayscale (single channel) images - pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] - else: - pil_images = [Image.fromarray(image) for image in images] - - return pil_images diff --git a/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_swin_fpn.py b/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_swin_fpn.py deleted file mode 100644 index e3d42197f4646cd9ecafac2095d3f8e079f0a729..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_swin_fpn.py +++ /dev/null @@ -1,127 +0,0 @@ -# model settings -model = dict( - type='MaskRCNN', - pretrained=None, - backbone=dict( - type='SwinTransformer', - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - use_checkpoint=False), - neck=dict( - type='FPN', - in_channels=[96, 192, 384, 768], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/pisa/README.md b/spaces/Andy1621/uniformer_image_detection/configs/pisa/README.md deleted file mode 100644 index 2ab689e2aafe5a046dddce8da62e61e249b9a41c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/pisa/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Prime Sample Attention in Object Detection - -## Introduction - -[ALGORITHM] - -```latex -@inproceedings{cao2019prime, - title={Prime sample attention in object detection}, - author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2020} -} -``` - -## Results and models - -| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | -|:----:|:-------:|:-------------------:|:-------:|:------:|:-------:|:------:|:--------:| -| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | -| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | -| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | -| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | -| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | -| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | -| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | -| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | -| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - | -| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | -| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | -| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | -| × | SSD300 | VGG16 | 1x | 25.6 | | - | -| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | -| × | SSD300 | VGG16 | 1x | 29.3 | | - | -| √ | SSD300 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | - -**Notes:** - -- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. -- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 0141a6d0925c2a2aa37517670a9f12ac7d3a02d4..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(mask_size=(66, 66), num_classes=150), - auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py deleted file mode 100644 index 3d9e06f029e46c14cb9ddb39319cabe86fef9b44..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = [ - '../../configs/_base_/models/upernet_uniformer.py', - '../../configs/_base_/datasets/ade20k.py', - '../../configs/_base_/default_runtime.py', - '../../configs/_base_/schedules/schedule_160k.py' -] -model = dict( - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - drop_path_rate=0.25, - windows=True, - hybrid=False, - window_size=32 - ), - decode_head=dict( - in_channels=[64, 128, 320, 512], - num_classes=150 - ), - auxiliary_head=dict( - in_channels=320, - num_classes=150 - )) - -# AdamW optimizer, no weight decay for position embedding & layer norm in backbone -optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/spaces/Apex-X/ROOPOK/roop/processors/frame/__init__.py b/spaces/Apex-X/ROOPOK/roop/processors/frame/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/loss.py b/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/loss.py deleted file mode 100644 index b78caabb33133572cefaacf816468277ee7da18f..0000000000000000000000000000000000000000 --- a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/loss.py +++ /dev/null @@ -1,136 +0,0 @@ -from fastai import * -from fastai.core import * -from fastai.torch_core import * -from fastai.callbacks import hook_outputs -import torchvision.models as models - - -class FeatureLoss(nn.Module): - def __init__(self, layer_wgts=[20, 70, 10]): - super().__init__() - - self.m_feat = models.vgg16_bn(True).features.cuda().eval() - requires_grad(self.m_feat, False) - blocks = [ - i - 1 - for i, o in enumerate(children(self.m_feat)) - if isinstance(o, nn.MaxPool2d) - ] - layer_ids = blocks[2:5] - self.loss_features = [self.m_feat[i] for i in layer_ids] - self.hooks = hook_outputs(self.loss_features, detach=False) - self.wgts = layer_wgts - self.metric_names = ['pixel'] + [f'feat_{i}' for i in range(len(layer_ids))] - self.base_loss = F.l1_loss - - def _make_features(self, x, clone=False): - self.m_feat(x) - return [(o.clone() if clone else o) for o in self.hooks.stored] - - def forward(self, input, target): - out_feat = self._make_features(target, clone=True) - in_feat = self._make_features(input) - self.feat_losses = [self.base_loss(input, target)] - self.feat_losses += [ - self.base_loss(f_in, f_out) * w - for f_in, f_out, w in zip(in_feat, out_feat, self.wgts) - ] - - self.metrics = dict(zip(self.metric_names, self.feat_losses)) - return sum(self.feat_losses) - - def __del__(self): - self.hooks.remove() - - -# Refactored code, originally from https://github.com/VinceMarron/style_transfer -class WassFeatureLoss(nn.Module): - def __init__(self, layer_wgts=[5, 15, 2], wass_wgts=[3.0, 0.7, 0.01]): - super().__init__() - self.m_feat = models.vgg16_bn(True).features.cuda().eval() - requires_grad(self.m_feat, False) - blocks = [ - i - 1 - for i, o in enumerate(children(self.m_feat)) - if isinstance(o, nn.MaxPool2d) - ] - layer_ids = blocks[2:5] - self.loss_features = [self.m_feat[i] for i in layer_ids] - self.hooks = hook_outputs(self.loss_features, detach=False) - self.wgts = layer_wgts - self.wass_wgts = wass_wgts - self.metric_names = ( - ['pixel'] - + [f'feat_{i}' for i in range(len(layer_ids))] - + [f'wass_{i}' for i in range(len(layer_ids))] - ) - self.base_loss = F.l1_loss - - def _make_features(self, x, clone=False): - self.m_feat(x) - return [(o.clone() if clone else o) for o in self.hooks.stored] - - def _calc_2_moments(self, tensor): - chans = tensor.shape[1] - tensor = tensor.view(1, chans, -1) - n = tensor.shape[2] - mu = tensor.mean(2) - tensor = (tensor - mu[:, :, None]).squeeze(0) - # Prevents nasty bug that happens very occassionally- divide by zero. Why such things happen? - if n == 0: - return None, None - cov = torch.mm(tensor, tensor.t()) / float(n) - return mu, cov - - def _get_style_vals(self, tensor): - mean, cov = self._calc_2_moments(tensor) - if mean is None: - return None, None, None - eigvals, eigvects = torch.symeig(cov, eigenvectors=True) - eigroot_mat = torch.diag(torch.sqrt(eigvals.clamp(min=0))) - root_cov = torch.mm(torch.mm(eigvects, eigroot_mat), eigvects.t()) - tr_cov = eigvals.clamp(min=0).sum() - return mean, tr_cov, root_cov - - def _calc_l2wass_dist( - self, mean_stl, tr_cov_stl, root_cov_stl, mean_synth, cov_synth - ): - tr_cov_synth = torch.symeig(cov_synth, eigenvectors=True)[0].clamp(min=0).sum() - mean_diff_squared = (mean_stl - mean_synth).pow(2).sum() - cov_prod = torch.mm(torch.mm(root_cov_stl, cov_synth), root_cov_stl) - var_overlap = torch.sqrt( - torch.symeig(cov_prod, eigenvectors=True)[0].clamp(min=0) + 1e-8 - ).sum() - dist = mean_diff_squared + tr_cov_stl + tr_cov_synth - 2 * var_overlap - return dist - - def _single_wass_loss(self, pred, targ): - mean_test, tr_cov_test, root_cov_test = targ - mean_synth, cov_synth = self._calc_2_moments(pred) - loss = self._calc_l2wass_dist( - mean_test, tr_cov_test, root_cov_test, mean_synth, cov_synth - ) - return loss - - def forward(self, input, target): - out_feat = self._make_features(target, clone=True) - in_feat = self._make_features(input) - self.feat_losses = [self.base_loss(input, target)] - self.feat_losses += [ - self.base_loss(f_in, f_out) * w - for f_in, f_out, w in zip(in_feat, out_feat, self.wgts) - ] - - styles = [self._get_style_vals(i) for i in out_feat] - - if styles[0][0] is not None: - self.feat_losses += [ - self._single_wass_loss(f_pred, f_targ) * w - for f_pred, f_targ, w in zip(in_feat, styles, self.wass_wgts) - ] - - self.metrics = dict(zip(self.metric_names, self.feat_losses)) - return sum(self.feat_losses) - - def __del__(self): - self.hooks.remove() diff --git a/spaces/AriaMei/TTSdemo/train_ms.py b/spaces/AriaMei/TTSdemo/train_ms.py deleted file mode 100644 index ca3f3ab760f7c0647e620255eb73be9908b215fb..0000000000000000000000000000000000000000 --- a/spaces/AriaMei/TTSdemo/train_ms.py +++ /dev/null @@ -1,296 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '8899' - - hps = utils.get_hparams() - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - print(hps) or logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32,300,400,500,600,700,800,900,1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, - batch_size=hps.train.batch_size, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank==0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, emo) in enumerate(train_loader): - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - emo = emo.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers, emo) - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all.float()).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank==0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - print([x.item() for x in losses] + [global_step, lr]) or logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - global_step += 1 - - if rank == 0: - print('====> Epoch: {}'.format(epoch)) or logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, emo) in enumerate(eval_loader): - x, x_lengths = x.cuda(0), x_lengths.cuda(0) - spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) - y, y_lengths = y.cuda(0), y_lengths.cuda(0) - speakers = speakers.cuda(0) - emo = emo.cuda(0) - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - spec = spec[:1] - spec_lengths = spec_lengths[:1] - y = y[:1] - y_lengths = y_lengths[:1] - speakers = speakers[:1] - emo = emo[:1] - break - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers,emo, max_len=1000) - y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict = { - "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - } - audio_dict = { - "gen/audio": y_hat[0,:,:y_hat_lengths[0]] - } - if global_step == 0: - image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/Armandoliv/cars-parts-segmentation-resnet18/app.py b/spaces/Armandoliv/cars-parts-segmentation-resnet18/app.py deleted file mode 100644 index 34ecd18d9e36a2884d6c64570a229186b73de11a..0000000000000000000000000000000000000000 --- a/spaces/Armandoliv/cars-parts-segmentation-resnet18/app.py +++ /dev/null @@ -1,184 +0,0 @@ -import segmentation_models as sm -import numpy as np -import os -import cv2 -import keras -import matplotlib.colors as colorsHTML -from PIL import Image -import gradio as gr - -import os -os.system('wget https://huggingface.co/Armandoliv/cars-parts-segmentation-unet-resnet18/resolve/main/best_model.h5') -os.system('pip -qq install pycocotools @ git+https://github.com/philferriere/cocoapi.git@2929bd2ef6b451054755dfd7ceb09278f935f7ad#subdirectory=PythonAPI') - -c= ['_background_', 'back_bumper', 'back_glass', 'back_left_door','back_left_light', - 'back_right_door', 'back_right_light', 'front_bumper','front_glass', - 'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror', - 'right_mirror', 'tailgate', 'trunk', 'wheel'] - -colors = [ (245,255,250), (75,0,130), (0,255,0), (32,178,170),(0,0,255), (0,255,255), (255,0,255), (128,0,128), (255,140,0), - (85,107,47), (102,205,170), (0,191,255), (255,0,0), (255,228,196), (205,133,63), - (220,20,60), (255,69,0), (143,188,143), (255,255,0)] - - -sm.set_framework('tf.keras') - -sm.framework() - -BACKBONE = 'resnet18' -n_classes = 19 -activation = 'softmax' - -#create model -model = sm.Unet(BACKBONE, classes=n_classes, activation=activation) - -# load best weights -model.load_weights('best_model.h5') - -def get_colored_segmentation_image(seg_arr, n_classes, colors=colors): - output_height = seg_arr.shape[0] - output_width = seg_arr.shape[1] - - seg_img = np.zeros((output_height, output_width, 3)) - - for c in range(n_classes): - seg_arr_c = seg_arr[:, :] == c - # print(sum(sum(seg_arr_c)), colors[c] ) - seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8') - seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8') - seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8') - - return seg_img/255 - -def get_legends(class_names, colors, tags): - - n_classes = len(class_names) - legend = np.zeros(((len(class_names) * 25) + 25, 125, 3), - dtype="uint8") + 255 - - class_names_colors = enumerate(zip(class_names[:n_classes], - colors[:n_classes])) - j = 0 - for (i, (class_name, color)) in class_names_colors: - if i in tags: - color = [int(c) for c in color] - cv2.putText(legend, class_name, (5, (j * 25) + 17), - cv2.FONT_HERSHEY_COMPLEX, 0.35, (0, 0, 0), 1) - cv2.rectangle(legend, (100, (j* 25)), (125, (j * 25) + 25), - tuple(color), -1) - j +=1 - return legend - - - -def preprocess_image(path_img): - img = Image.open(path_img) - ww = 512 - hh = 512 - img.thumbnail((hh, ww)) - i = np.array(img) - ht, wd, cc= i.shape - - # create new image of desired size and color (blue) for padding - color = (0,0,0) - result = np.full((hh,ww,cc), color, dtype=np.uint8) - - # copy img image into center of result image - result[:ht, :wd] = img - return result, ht, wd - -def concat_lengends(seg_img, legend_img): - - new_h = np.maximum(seg_img.shape[0], legend_img.shape[0]) - new_w = seg_img.shape[1] + legend_img.shape[1] - - out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0] - - out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img) - out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img) - - return out_img - -def main_convert(filename): - - print(filename) - #load the image - img_path = filename - img = Image.open(img_path).convert("RGB") - tags = [] - - #preprocess the image - img_scaled_arr = preprocess_image(img_path) - image = np.expand_dims(img_scaled_arr[0], axis=0) - - #make the predictions - pr_mask = model.predict(image).squeeze() - pr_mask_int = np.zeros((pr_mask.shape[0],pr_mask.shape[1])) - - #filter the smallest noisy segments - kernel = np.ones((5, 5), 'uint8') - - for i in range(1,19): - array_one = np.round(pr_mask[:,:,i]) - op = cv2.morphologyEx(array_one, cv2.MORPH_OPEN, kernel) - if sum(sum(op ==1)) > 100: - tags.append(i) - pr_mask_int[op ==1] = i - - img_segmented = np.array(Image.fromarray(pr_mask_int[:img_scaled_arr[1], :img_scaled_arr[2]]).resize(img.size)) - - seg = get_colored_segmentation_image(img_segmented,19, colors=colors) - - fused_img = ((np.array(img)/255)/2 + seg/2).astype('float32') - - seg = Image.fromarray((seg*255).astype(np.uint8)) - fused_img = Image.fromarray((fused_img *255).astype(np.uint8)) - - #get the legends - legend_predicted = get_legends(c, colors, tags) - - final_img = concat_lengends(np.array(fused_img), np.array(legend_predicted)) - - return final_img, seg - - - -inputs = [gr.Image(type="filepath", label="Car Image")] -outputs = [gr.Image(type="PIL.Image", label="Detected Segments Image"),gr.Image(type="PIL.Image", label="Segment Image")] - - -title = "Car Parts Segmentation APP" -description = """This demo uses AI Models to detect 18 parts of cars: \n -1: background, -2: back bumper, -3: back glass, -4: back left door, -5: back left light, -6: back right door, -7: back right light, -8: front bumper, -9: front glass, -10: front left door, -11: front left light, -12: front right door, -13: front right light, -14: hood, -15: left mirror, -16: right mirror, -17: tailgate, -18: trunk, -19: wheel""" - -examples = [['test_image.jpeg']] -io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples, - css= """.gr-button-primary { background: -webkit-linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764; - background: linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; - background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; - background: -webkit-linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; - color:white !important}""" - ) - -io.launch() \ No newline at end of file diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_windows.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_windows.py deleted file mode 100644 index 10fc0d7e9f398dd550a42c6b8c0637684882ee60..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_windows.py +++ /dev/null @@ -1,72 +0,0 @@ -import sys -from dataclasses import dataclass - - -@dataclass -class WindowsConsoleFeatures: - """Windows features available.""" - - vt: bool = False - """The console supports VT codes.""" - truecolor: bool = False - """The console supports truecolor.""" - - -try: - import ctypes - from ctypes import LibraryLoader - - if sys.platform == "win32": - windll = LibraryLoader(ctypes.WinDLL) - else: - windll = None - raise ImportError("Not windows") - - from pip._vendor.rich._win32_console import ( - ENABLE_VIRTUAL_TERMINAL_PROCESSING, - GetConsoleMode, - GetStdHandle, - LegacyWindowsError, - ) - -except (AttributeError, ImportError, ValueError): - - # Fallback if we can't load the Windows DLL - def get_windows_console_features() -> WindowsConsoleFeatures: - features = WindowsConsoleFeatures() - return features - -else: - - def get_windows_console_features() -> WindowsConsoleFeatures: - """Get windows console features. - - Returns: - WindowsConsoleFeatures: An instance of WindowsConsoleFeatures. - """ - handle = GetStdHandle() - try: - console_mode = GetConsoleMode(handle) - success = True - except LegacyWindowsError: - console_mode = 0 - success = False - vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING) - truecolor = False - if vt: - win_version = sys.getwindowsversion() - truecolor = win_version.major > 10 or ( - win_version.major == 10 and win_version.build >= 15063 - ) - features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor) - return features - - -if __name__ == "__main__": - import platform - - features = get_windows_console_features() - from pip._vendor.rich import print - - print(f'platform="{platform.system()}"') - print(repr(features)) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/__init__.py deleted file mode 100644 index 6c24cc2b30421bad1cb5f8ca525bc42b57ad9761..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/__init__.py +++ /dev/null @@ -1,247 +0,0 @@ -"""Extensions to the 'distutils' for large or complex distributions""" - -import functools -import os -import re -import warnings - -import _distutils_hack.override # noqa: F401 - -import distutils.core -from distutils.errors import DistutilsOptionError -from distutils.util import convert_path as _convert_path - -from ._deprecation_warning import SetuptoolsDeprecationWarning - -import setuptools.version -from setuptools.extension import Extension -from setuptools.dist import Distribution -from setuptools.depends import Require -from setuptools.discovery import PackageFinder, PEP420PackageFinder -from . import monkey -from . import logging - - -__all__ = [ - 'setup', - 'Distribution', - 'Command', - 'Extension', - 'Require', - 'SetuptoolsDeprecationWarning', - 'find_packages', - 'find_namespace_packages', -] - -__version__ = setuptools.version.__version__ - -bootstrap_install_from = None - - -find_packages = PackageFinder.find -find_namespace_packages = PEP420PackageFinder.find - - -def _install_setup_requires(attrs): - # Note: do not use `setuptools.Distribution` directly, as - # our PEP 517 backend patch `distutils.core.Distribution`. - class MinimalDistribution(distutils.core.Distribution): - """ - A minimal version of a distribution for supporting the - fetch_build_eggs interface. - """ - - def __init__(self, attrs): - _incl = 'dependency_links', 'setup_requires' - filtered = {k: attrs[k] for k in set(_incl) & set(attrs)} - super().__init__(filtered) - # Prevent accidentally triggering discovery with incomplete set of attrs - self.set_defaults._disable() - - def _get_project_config_files(self, filenames=None): - """Ignore ``pyproject.toml``, they are not related to setup_requires""" - try: - cfg, toml = super()._split_standard_project_metadata(filenames) - return cfg, () - except Exception: - return filenames, () - - def finalize_options(self): - """ - Disable finalize_options to avoid building the working set. - Ref #2158. - """ - - dist = MinimalDistribution(attrs) - - # Honor setup.cfg's options. - dist.parse_config_files(ignore_option_errors=True) - if dist.setup_requires: - dist.fetch_build_eggs(dist.setup_requires) - - -def setup(**attrs): - # Make sure we have any requirements needed to interpret 'attrs'. - logging.configure() - _install_setup_requires(attrs) - return distutils.core.setup(**attrs) - - -setup.__doc__ = distutils.core.setup.__doc__ - - -_Command = monkey.get_unpatched(distutils.core.Command) - - -class Command(_Command): - """ - Setuptools internal actions are organized using a *command design pattern*. - This means that each action (or group of closely related actions) executed during - the build should be implemented as a ``Command`` subclass. - - These commands are abstractions and do not necessarily correspond to a command that - can (or should) be executed via a terminal, in a CLI fashion (although historically - they would). - - When creating a new command from scratch, custom defined classes **SHOULD** inherit - from ``setuptools.Command`` and implement a few mandatory methods. - Between these mandatory methods, are listed: - - .. method:: initialize_options(self) - - Set or (reset) all options/attributes/caches used by the command - to their default values. Note that these values may be overwritten during - the build. - - .. method:: finalize_options(self) - - Set final values for all options/attributes used by the command. - Most of the time, each option/attribute/cache should only be set if it does not - have any value yet (e.g. ``if self.attr is None: self.attr = val``). - - .. method:: run(self) - - Execute the actions intended by the command. - (Side effects **SHOULD** only take place when ``run`` is executed, - for example, creating new files or writing to the terminal output). - - A useful analogy for command classes is to think of them as subroutines with local - variables called "options". The options are "declared" in ``initialize_options()`` - and "defined" (given their final values, aka "finalized") in ``finalize_options()``, - both of which must be defined by every command class. The "body" of the subroutine, - (where it does all the work) is the ``run()`` method. - Between ``initialize_options()`` and ``finalize_options()``, ``setuptools`` may set - the values for options/attributes based on user's input (or circumstance), - which means that the implementation should be careful to not overwrite values in - ``finalize_options`` unless necessary. - - Please note that other commands (or other parts of setuptools) may also overwrite - the values of the command's options/attributes multiple times during the build - process. - Therefore it is important to consistently implement ``initialize_options()`` and - ``finalize_options()``. For example, all derived attributes (or attributes that - depend on the value of other attributes) **SHOULD** be recomputed in - ``finalize_options``. - - When overwriting existing commands, custom defined classes **MUST** abide by the - same APIs implemented by the original class. They also **SHOULD** inherit from the - original class. - """ - - command_consumes_arguments = False - - def __init__(self, dist, **kw): - """ - Construct the command for dist, updating - vars(self) with any keyword parameters. - """ - super().__init__(dist) - vars(self).update(kw) - - def _ensure_stringlike(self, option, what, default=None): - val = getattr(self, option) - if val is None: - setattr(self, option, default) - return default - elif not isinstance(val, str): - raise DistutilsOptionError( - "'%s' must be a %s (got `%s`)" % (option, what, val) - ) - return val - - def ensure_string_list(self, option): - r"""Ensure that 'option' is a list of strings. If 'option' is - currently a string, we split it either on /,\s*/ or /\s+/, so - "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become - ["foo", "bar", "baz"]. - - .. - TODO: This method seems to be similar to the one in ``distutils.cmd`` - Probably it is just here for backward compatibility with old Python versions? - - :meta private: - """ - val = getattr(self, option) - if val is None: - return - elif isinstance(val, str): - setattr(self, option, re.split(r',\s*|\s+', val)) - else: - if isinstance(val, list): - ok = all(isinstance(v, str) for v in val) - else: - ok = False - if not ok: - raise DistutilsOptionError( - "'%s' must be a list of strings (got %r)" % (option, val) - ) - - def reinitialize_command(self, command, reinit_subcommands=0, **kw): - cmd = _Command.reinitialize_command(self, command, reinit_subcommands) - vars(cmd).update(kw) - return cmd - - -def _find_all_simple(path): - """ - Find all files under 'path' - """ - results = ( - os.path.join(base, file) - for base, dirs, files in os.walk(path, followlinks=True) - for file in files - ) - return filter(os.path.isfile, results) - - -def findall(dir=os.curdir): - """ - Find all files under 'dir' and return the list of full filenames. - Unless dir is '.', return full filenames with dir prepended. - """ - files = _find_all_simple(dir) - if dir == os.curdir: - make_rel = functools.partial(os.path.relpath, start=dir) - files = map(make_rel, files) - return list(files) - - -@functools.wraps(_convert_path) -def convert_path(pathname): - from inspect import cleandoc - - msg = """ - The function `convert_path` is considered internal and not part of the public API. - Its direct usage by 3rd-party packages is considered deprecated and the function - may be removed in the future. - """ - warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning) - return _convert_path(pathname) - - -class sic(str): - """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)""" - - -# Apply monkey patches -monkey.patch_all() diff --git a/spaces/BairaS/Tabular_ML/app.py b/spaces/BairaS/Tabular_ML/app.py deleted file mode 100644 index 95bd675795c8481103158372f9fc8580b719f157..0000000000000000000000000000000000000000 --- a/spaces/BairaS/Tabular_ML/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import streamlit as st -import plotly.figure_factory as ff -import matplotlib, matplotlib.pyplot as pl -import shap -import pandas as pd -import os - -#import profiling capability -import pandas_profiling -from streamlit_pandas_profiling import st_profile_report - -#ML stuff -from pycaret.classification import * -#from pycaret.regression import setup, compare_models, pull, save_model, load_model - -if os.path.exists("sourcedata.csv"): - df = pd.read_csv("sourcedata.csv", index_col=None) - -with st.sidebar: - st.image("https://www.onepointltd.com/wp-content/uploads/2020/03/inno2.png") - st.title("Tabular_ML") - choice = st.radio("Navigation", ["Upload", "Profiling", "Modelling", "Analyse", "Download"]) - st.info("This application allows you to build an automated ML pipeline using Streamlit, Panadas Profiling and PyCaret.") - -if choice == "Upload": - st.title("Upload Your Data for Modelling!") - file = st.file_uploader("Upload Your Dataset Here") - if file: - df = pd.read_csv(file, index_col=None) - df.to_csv("sourcedata.csv", index=None) - st.dataframe(df) - -if choice == "Profiling": - st.title("Automated Exploratory Data Analysis") - #profile_report = df.profile_report() - #st_profile_report(profile_report) - st_profile_report(pandas_profiling.ProfileReport(df)) - -if choice == "Modelling": - chosen_target = st.selectbox('Choose the Target Column', df.columns) - if st.button('Run Modelling'): - setup(df, target=chosen_target, silent=True) - setup_df = pull() - #st.dataframe(setup_df) - best_model = compare_models(include = ['rf','et', 'dt', 'lightgbm']) - compare_df = pull() - st.dataframe(compare_df) - save_model(best_model, 'best_model') - -if choice == "Analyse": - st.title("Correlation Analysis") - best_model = load_model('best_model') - train_pipe = best_model[:-1].transform(df) - explainer = shap.TreeExplainer(best_model.named_steps["trained_model"]) - shap_values = explainer.shap_values(train_pipe) - pl.title('Assessing feature importance based on Shap values') - shap.summary_plot(shap_values,df,plot_type="bar",show=False) - st.pyplot(bbox_inches='tight') - pl.clf() - #st.pyplot(shap.force_plot(explainer.expected_value[0], shap_values[0])) - #shap.force_plot(explainer.expected_value, shap_values, train_pipe) - #st.write(interpret_model(train_pipe, plot = 'correlation')) - - #chosen_target = st.selectbox('Choose the Target Column', df.columns) - #if st.button('Run Modelling'): - # setup(df, target=chosen_target, silent=True) - # setup_df = pull() - - # creating a model - # xgboost = create_model('xgboost') - - # interpret model - # st.write(interpret_model(xgboost,, plot = 'correlation')) - #st.plotly_chart(interpret_model(xgboost), use_container_width=True) - - #chosen_target = st.selectbox('Choose the Target Column', df.columns) - #if st.button('Run xgboost Modelling Analysis'): - #chosen_target = st.selectbox('Choose the Target Column', df.columns) - # creating a model - #setup(df, target=chosen_target, silent=True) - #xgboost = create_model('xgboost') - #interpret model - - - -if choice == "Download": - pass \ No newline at end of file diff --git a/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/custom.py b/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/custom.py deleted file mode 100644 index 33f302a4b55ba1e8ec282ec3292b6263c06dfb91..0000000000000000000000000000000000000000 --- a/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/custom.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import numpy as np -import albumentations -from torch.utils.data import Dataset - -from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex - - -class CustomBase(Dataset): - def __init__(self, *args, **kwargs): - super().__init__() - self.data = None - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - example = self.data[i] - return example - - - -class CustomTrain(CustomBase): - def __init__(self, size, training_images_list_file): - super().__init__() - with open(training_images_list_file, "r") as f: - paths = f.read().splitlines() - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - - -class CustomTest(CustomBase): - def __init__(self, size, test_images_list_file): - super().__init__() - with open(test_images_list_file, "r") as f: - paths = f.read().splitlines() - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - - diff --git a/spaces/BetterAPI/BetterChat/src/hooks.server.ts b/spaces/BetterAPI/BetterChat/src/hooks.server.ts deleted file mode 100644 index 04cc75cac042fda3cabd7244584ae9aa5bf2a46f..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat/src/hooks.server.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { dev } from "$app/environment"; -import { COOKIE_NAME } from "$env/static/private"; -import type { Handle } from "@sveltejs/kit"; -import { PUBLIC_GOOGLE_ANALYTICS_ID } from "$env/static/public"; -import { addYears } from "date-fns"; - -export const handle: Handle = async ({ event, resolve }) => { - const token = event.cookies.get(COOKIE_NAME); - - event.locals.sessionId = token || crypto.randomUUID(); - - // Refresh cookie expiration date - event.cookies.set(COOKIE_NAME, event.locals.sessionId, { - path: "/", - // So that it works inside the space's iframe - sameSite: dev ? "lax" : "none", - secure: !dev, - httpOnly: true, - expires: addYears(new Date(), 1), - }); - - let replaced = false; - - const response = await resolve(event, { - transformPageChunk: (chunk) => { - // For some reason, Sveltekit doesn't let us load env variables from .env in the app.html template - if (replaced || !chunk.html.includes("%gaId%")) { - return chunk.html; - } - replaced = true; - - return chunk.html.replace("%gaId%", PUBLIC_GOOGLE_ANALYTICS_ID); - }, - }); - - return response; -}; diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/containers.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/containers.py deleted file mode 100644 index e29cf368991ccb083b67cda8133e4635defbfe53..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/containers.py +++ /dev/null @@ -1,167 +0,0 @@ -from itertools import zip_longest -from typing import ( - Iterator, - Iterable, - List, - Optional, - Union, - overload, - TypeVar, - TYPE_CHECKING, -) - -if TYPE_CHECKING: - from .console import ( - Console, - ConsoleOptions, - JustifyMethod, - OverflowMethod, - RenderResult, - RenderableType, - ) - from .text import Text - -from .cells import cell_len -from .measure import Measurement - -T = TypeVar("T") - - -class Renderables: - """A list subclass which renders its contents to the console.""" - - def __init__( - self, renderables: Optional[Iterable["RenderableType"]] = None - ) -> None: - self._renderables: List["RenderableType"] = ( - list(renderables) if renderables is not None else [] - ) - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - """Console render method to insert line-breaks.""" - yield from self._renderables - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - dimensions = [ - Measurement.get(console, options, renderable) - for renderable in self._renderables - ] - if not dimensions: - return Measurement(1, 1) - _min = max(dimension.minimum for dimension in dimensions) - _max = max(dimension.maximum for dimension in dimensions) - return Measurement(_min, _max) - - def append(self, renderable: "RenderableType") -> None: - self._renderables.append(renderable) - - def __iter__(self) -> Iterable["RenderableType"]: - return iter(self._renderables) - - -class Lines: - """A list subclass which can render to the console.""" - - def __init__(self, lines: Iterable["Text"] = ()) -> None: - self._lines: List["Text"] = list(lines) - - def __repr__(self) -> str: - return f"Lines({self._lines!r})" - - def __iter__(self) -> Iterator["Text"]: - return iter(self._lines) - - @overload - def __getitem__(self, index: int) -> "Text": - ... - - @overload - def __getitem__(self, index: slice) -> List["Text"]: - ... - - def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]: - return self._lines[index] - - def __setitem__(self, index: int, value: "Text") -> "Lines": - self._lines[index] = value - return self - - def __len__(self) -> int: - return self._lines.__len__() - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - """Console render method to insert line-breaks.""" - yield from self._lines - - def append(self, line: "Text") -> None: - self._lines.append(line) - - def extend(self, lines: Iterable["Text"]) -> None: - self._lines.extend(lines) - - def pop(self, index: int = -1) -> "Text": - return self._lines.pop(index) - - def justify( - self, - console: "Console", - width: int, - justify: "JustifyMethod" = "left", - overflow: "OverflowMethod" = "fold", - ) -> None: - """Justify and overflow text to a given width. - - Args: - console (Console): Console instance. - width (int): Number of characters per line. - justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left". - overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold". - - """ - from .text import Text - - if justify == "left": - for line in self._lines: - line.truncate(width, overflow=overflow, pad=True) - elif justify == "center": - for line in self._lines: - line.rstrip() - line.truncate(width, overflow=overflow) - line.pad_left((width - cell_len(line.plain)) // 2) - line.pad_right(width - cell_len(line.plain)) - elif justify == "right": - for line in self._lines: - line.rstrip() - line.truncate(width, overflow=overflow) - line.pad_left(width - cell_len(line.plain)) - elif justify == "full": - for line_index, line in enumerate(self._lines): - if line_index == len(self._lines) - 1: - break - words = line.split(" ") - words_size = sum(cell_len(word.plain) for word in words) - num_spaces = len(words) - 1 - spaces = [1 for _ in range(num_spaces)] - index = 0 - if spaces: - while words_size + num_spaces < width: - spaces[len(spaces) - index - 1] += 1 - num_spaces += 1 - index = (index + 1) % len(spaces) - tokens: List[Text] = [] - for index, (word, next_word) in enumerate( - zip_longest(words, words[1:]) - ): - tokens.append(word) - if index < len(spaces): - style = word.get_style_at_offset(console, -1) - next_style = next_word.get_style_at_offset(console, 0) - space_style = style if style == next_style else line.style - tokens.append(Text(" " * spaces[index], style=space_style)) - self[line_index] = Text("").join(tokens) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/jaraco/text/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/jaraco/text/__init__.py deleted file mode 100644 index a0306d5ff5cc4a2eb76458c127c462efe59a566d..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/jaraco/text/__init__.py +++ /dev/null @@ -1,599 +0,0 @@ -import re -import itertools -import textwrap -import functools - -try: - from importlib.resources import files # type: ignore -except ImportError: # pragma: nocover - from setuptools.extern.importlib_resources import files # type: ignore - -from setuptools.extern.jaraco.functools import compose, method_cache -from setuptools.extern.jaraco.context import ExceptionTrap - - -def substitution(old, new): - """ - Return a function that will perform a substitution on a string - """ - return lambda s: s.replace(old, new) - - -def multi_substitution(*substitutions): - """ - Take a sequence of pairs specifying substitutions, and create - a function that performs those substitutions. - - >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') - 'baz' - """ - substitutions = itertools.starmap(substitution, substitutions) - # compose function applies last function first, so reverse the - # substitutions to get the expected order. - substitutions = reversed(tuple(substitutions)) - return compose(*substitutions) - - -class FoldedCase(str): - """ - A case insensitive string class; behaves just like str - except compares equal when the only variation is case. - - >>> s = FoldedCase('hello world') - - >>> s == 'Hello World' - True - - >>> 'Hello World' == s - True - - >>> s != 'Hello World' - False - - >>> s.index('O') - 4 - - >>> s.split('O') - ['hell', ' w', 'rld'] - - >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) - ['alpha', 'Beta', 'GAMMA'] - - Sequence membership is straightforward. - - >>> "Hello World" in [s] - True - >>> s in ["Hello World"] - True - - You may test for set inclusion, but candidate and elements - must both be folded. - - >>> FoldedCase("Hello World") in {s} - True - >>> s in {FoldedCase("Hello World")} - True - - String inclusion works as long as the FoldedCase object - is on the right. - - >>> "hello" in FoldedCase("Hello World") - True - - But not if the FoldedCase object is on the left: - - >>> FoldedCase('hello') in 'Hello World' - False - - In that case, use ``in_``: - - >>> FoldedCase('hello').in_('Hello World') - True - - >>> FoldedCase('hello') > FoldedCase('Hello') - False - """ - - def __lt__(self, other): - return self.lower() < other.lower() - - def __gt__(self, other): - return self.lower() > other.lower() - - def __eq__(self, other): - return self.lower() == other.lower() - - def __ne__(self, other): - return self.lower() != other.lower() - - def __hash__(self): - return hash(self.lower()) - - def __contains__(self, other): - return super().lower().__contains__(other.lower()) - - def in_(self, other): - "Does self appear in other?" - return self in FoldedCase(other) - - # cache lower since it's likely to be called frequently. - @method_cache - def lower(self): - return super().lower() - - def index(self, sub): - return self.lower().index(sub.lower()) - - def split(self, splitter=' ', maxsplit=0): - pattern = re.compile(re.escape(splitter), re.I) - return pattern.split(self, maxsplit) - - -# Python 3.8 compatibility -_unicode_trap = ExceptionTrap(UnicodeDecodeError) - - -@_unicode_trap.passes -def is_decodable(value): - r""" - Return True if the supplied value is decodable (using the default - encoding). - - >>> is_decodable(b'\xff') - False - >>> is_decodable(b'\x32') - True - """ - value.decode() - - -def is_binary(value): - r""" - Return True if the value appears to be binary (that is, it's a byte - string and isn't decodable). - - >>> is_binary(b'\xff') - True - >>> is_binary('\xff') - False - """ - return isinstance(value, bytes) and not is_decodable(value) - - -def trim(s): - r""" - Trim something like a docstring to remove the whitespace that - is common due to indentation and formatting. - - >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") - 'foo = bar\n\tbar = baz' - """ - return textwrap.dedent(s).strip() - - -def wrap(s): - """ - Wrap lines of text, retaining existing newlines as - paragraph markers. - - >>> print(wrap(lorem_ipsum)) - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do - eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad - minim veniam, quis nostrud exercitation ullamco laboris nisi ut - aliquip ex ea commodo consequat. Duis aute irure dolor in - reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla - pariatur. Excepteur sint occaecat cupidatat non proident, sunt in - culpa qui officia deserunt mollit anim id est laborum. - - Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam - varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus - magna felis sollicitudin mauris. Integer in mauris eu nibh euismod - gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis - risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, - eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas - fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla - a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, - neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing - sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque - nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus - quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, - molestie eu, feugiat in, orci. In hac habitasse platea dictumst. - """ - paragraphs = s.splitlines() - wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) - return '\n\n'.join(wrapped) - - -def unwrap(s): - r""" - Given a multi-line string, return an unwrapped version. - - >>> wrapped = wrap(lorem_ipsum) - >>> wrapped.count('\n') - 20 - >>> unwrapped = unwrap(wrapped) - >>> unwrapped.count('\n') - 1 - >>> print(unwrapped) - Lorem ipsum dolor sit amet, consectetur adipiscing ... - Curabitur pretium tincidunt lacus. Nulla gravida orci ... - - """ - paragraphs = re.split(r'\n\n+', s) - cleaned = (para.replace('\n', ' ') for para in paragraphs) - return '\n'.join(cleaned) - - - - -class Splitter(object): - """object that will split a string with the given arguments for each call - - >>> s = Splitter(',') - >>> s('hello, world, this is your, master calling') - ['hello', ' world', ' this is your', ' master calling'] - """ - - def __init__(self, *args): - self.args = args - - def __call__(self, s): - return s.split(*self.args) - - -def indent(string, prefix=' ' * 4): - """ - >>> indent('foo') - ' foo' - """ - return prefix + string - - -class WordSet(tuple): - """ - Given an identifier, return the words that identifier represents, - whether in camel case, underscore-separated, etc. - - >>> WordSet.parse("camelCase") - ('camel', 'Case') - - >>> WordSet.parse("under_sep") - ('under', 'sep') - - Acronyms should be retained - - >>> WordSet.parse("firstSNL") - ('first', 'SNL') - - >>> WordSet.parse("you_and_I") - ('you', 'and', 'I') - - >>> WordSet.parse("A simple test") - ('A', 'simple', 'test') - - Multiple caps should not interfere with the first cap of another word. - - >>> WordSet.parse("myABCClass") - ('my', 'ABC', 'Class') - - The result is a WordSet, so you can get the form you need. - - >>> WordSet.parse("myABCClass").underscore_separated() - 'my_ABC_Class' - - >>> WordSet.parse('a-command').camel_case() - 'ACommand' - - >>> WordSet.parse('someIdentifier').lowered().space_separated() - 'some identifier' - - Slices of the result should return another WordSet. - - >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() - 'out_of_context' - - >>> WordSet.from_class_name(WordSet()).lowered().space_separated() - 'word set' - - >>> example = WordSet.parse('figured it out') - >>> example.headless_camel_case() - 'figuredItOut' - >>> example.dash_separated() - 'figured-it-out' - - """ - - _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') - - def capitalized(self): - return WordSet(word.capitalize() for word in self) - - def lowered(self): - return WordSet(word.lower() for word in self) - - def camel_case(self): - return ''.join(self.capitalized()) - - def headless_camel_case(self): - words = iter(self) - first = next(words).lower() - new_words = itertools.chain((first,), WordSet(words).camel_case()) - return ''.join(new_words) - - def underscore_separated(self): - return '_'.join(self) - - def dash_separated(self): - return '-'.join(self) - - def space_separated(self): - return ' '.join(self) - - def trim_right(self, item): - """ - Remove the item from the end of the set. - - >>> WordSet.parse('foo bar').trim_right('foo') - ('foo', 'bar') - >>> WordSet.parse('foo bar').trim_right('bar') - ('foo',) - >>> WordSet.parse('').trim_right('bar') - () - """ - return self[:-1] if self and self[-1] == item else self - - def trim_left(self, item): - """ - Remove the item from the beginning of the set. - - >>> WordSet.parse('foo bar').trim_left('foo') - ('bar',) - >>> WordSet.parse('foo bar').trim_left('bar') - ('foo', 'bar') - >>> WordSet.parse('').trim_left('bar') - () - """ - return self[1:] if self and self[0] == item else self - - def trim(self, item): - """ - >>> WordSet.parse('foo bar').trim('foo') - ('bar',) - """ - return self.trim_left(item).trim_right(item) - - def __getitem__(self, item): - result = super(WordSet, self).__getitem__(item) - if isinstance(item, slice): - result = WordSet(result) - return result - - @classmethod - def parse(cls, identifier): - matches = cls._pattern.finditer(identifier) - return WordSet(match.group(0) for match in matches) - - @classmethod - def from_class_name(cls, subject): - return cls.parse(subject.__class__.__name__) - - -# for backward compatibility -words = WordSet.parse - - -def simple_html_strip(s): - r""" - Remove HTML from the string `s`. - - >>> str(simple_html_strip('')) - '' - - >>> print(simple_html_strip('A stormy day in paradise')) - A stormy day in paradise - - >>> print(simple_html_strip('Somebody tell the truth.')) - Somebody tell the truth. - - >>> print(simple_html_strip('What about
      \nmultiple lines?')) - What about - multiple lines? - """ - html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) - texts = (match.group(3) or '' for match in html_stripper.finditer(s)) - return ''.join(texts) - - -class SeparatedValues(str): - """ - A string separated by a separator. Overrides __iter__ for getting - the values. - - >>> list(SeparatedValues('a,b,c')) - ['a', 'b', 'c'] - - Whitespace is stripped and empty values are discarded. - - >>> list(SeparatedValues(' a, b , c, ')) - ['a', 'b', 'c'] - """ - - separator = ',' - - def __iter__(self): - parts = self.split(self.separator) - return filter(None, (part.strip() for part in parts)) - - -class Stripper: - r""" - Given a series of lines, find the common prefix and strip it from them. - - >>> lines = [ - ... 'abcdefg\n', - ... 'abc\n', - ... 'abcde\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix - 'abc' - >>> list(res.lines) - ['defg\n', '\n', 'de\n'] - - If no prefix is common, nothing should be stripped. - - >>> lines = [ - ... 'abcd\n', - ... '1234\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix = '' - >>> list(res.lines) - ['abcd\n', '1234\n'] - """ - - def __init__(self, prefix, lines): - self.prefix = prefix - self.lines = map(self, lines) - - @classmethod - def strip_prefix(cls, lines): - prefix_lines, lines = itertools.tee(lines) - prefix = functools.reduce(cls.common_prefix, prefix_lines) - return cls(prefix, lines) - - def __call__(self, line): - if not self.prefix: - return line - null, prefix, rest = line.partition(self.prefix) - return rest - - @staticmethod - def common_prefix(s1, s2): - """ - Return the common prefix of two lines. - """ - index = min(len(s1), len(s2)) - while s1[:index] != s2[:index]: - index -= 1 - return s1[:index] - - -def remove_prefix(text, prefix): - """ - Remove the prefix from the text if it exists. - - >>> remove_prefix('underwhelming performance', 'underwhelming ') - 'performance' - - >>> remove_prefix('something special', 'sample') - 'something special' - """ - null, prefix, rest = text.rpartition(prefix) - return rest - - -def remove_suffix(text, suffix): - """ - Remove the suffix from the text if it exists. - - >>> remove_suffix('name.git', '.git') - 'name' - - >>> remove_suffix('something special', 'sample') - 'something special' - """ - rest, suffix, null = text.partition(suffix) - return rest - - -def normalize_newlines(text): - r""" - Replace alternate newlines with the canonical newline. - - >>> normalize_newlines('Lorem Ipsum\u2029') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\r\n') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\x85') - 'Lorem Ipsum\n' - """ - newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] - pattern = '|'.join(newlines) - return re.sub(pattern, '\n', text) - - -def _nonblank(str): - return str and not str.startswith('#') - - -@functools.singledispatch -def yield_lines(iterable): - r""" - Yield valid lines of a string or iterable. - - >>> list(yield_lines('')) - [] - >>> list(yield_lines(['foo', 'bar'])) - ['foo', 'bar'] - >>> list(yield_lines('foo\nbar')) - ['foo', 'bar'] - >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) - ['foo', 'baz #comment'] - >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) - ['foo', 'bar', 'baz', 'bing'] - """ - return itertools.chain.from_iterable(map(yield_lines, iterable)) - - -@yield_lines.register(str) -def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) - - -def drop_comment(line): - """ - Drop comments. - - >>> drop_comment('foo # bar') - 'foo' - - A hash without a space may be in a URL. - - >>> drop_comment('http://example.com/foo#bar') - 'http://example.com/foo#bar' - """ - return line.partition(' #')[0] - - -def join_continuation(lines): - r""" - Join lines continued by a trailing backslash. - - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) - ['foobarbaz'] - - Not sure why, but... - The character preceeding the backslash is also elided. - - >>> list(join_continuation(['goo\\', 'dly'])) - ['godly'] - - A terrible idea, but... - If no line is available to continue, suppress the lines. - - >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) - ['foo'] - """ - lines = iter(lines) - for item in lines: - while item.endswith('\\'): - try: - item = item[:-2].strip() + next(lines) - except StopIteration: - return - yield item diff --git a/spaces/BwayKC/prompthero-openjourney-v2/app.py b/spaces/BwayKC/prompthero-openjourney-v2/app.py deleted file mode 100644 index 4fa45eda1d4a0af263ec59b35e375b837fe1ecf1..0000000000000000000000000000000000000000 --- a/spaces/BwayKC/prompthero-openjourney-v2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/prompthero/openjourney-v2").launch() \ No newline at end of file diff --git a/spaces/CK42/sentiment-model-comparison/README.md b/spaces/CK42/sentiment-model-comparison/README.md deleted file mode 100644 index 21c3c8b5a586ea57279af0c3cc384947a3a4b5d9..0000000000000000000000000000000000000000 --- a/spaces/CK42/sentiment-model-comparison/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sentiment Model Comparison -emoji: 📚 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.1.4 -python_version: 3.9.13 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/DualStyleGAN/dualstylegan.py b/spaces/CVPR/DualStyleGAN/dualstylegan.py deleted file mode 100644 index 5141ffc07c259cdcc8bcc3ebfddbaa63b3299500..0000000000000000000000000000000000000000 --- a/spaces/CVPR/DualStyleGAN/dualstylegan.py +++ /dev/null @@ -1,206 +0,0 @@ -from __future__ import annotations - -import argparse -import os -import pathlib -import subprocess -import sys -from typing import Callable - -import dlib -import huggingface_hub -import numpy as np -import PIL.Image -import torch -import torch.nn as nn -import torchvision.transforms as T - -if os.getenv('SYSTEM') == 'spaces': - os.system("sed -i '10,17d' DualStyleGAN/model/stylegan/op/fused_act.py") - os.system("sed -i '10,17d' DualStyleGAN/model/stylegan/op/upfirdn2d.py") - -app_dir = pathlib.Path(__file__).parent -submodule_dir = app_dir / 'DualStyleGAN' -sys.path.insert(0, submodule_dir.as_posix()) - -from model.dualstylegan import DualStyleGAN -from model.encoder.align_all_parallel import align_face -from model.encoder.psp import pSp - -MODEL_REPO = 'CVPR/DualStyleGAN' - - -class Model: - def __init__(self): - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self.landmark_model = self._create_dlib_landmark_model() - self.encoder_dict = self._load_encoder() - self.transform = self._create_transform() - self.encoder_type = 'z+' - - self.style_types = [ - 'cartoon', - 'caricature', - 'anime', - 'arcane', - 'comic', - 'pixar', - 'slamdunk', - ] - self.generator_dict = { - style_type: self._load_generator(style_type) - for style_type in self.style_types - } - self.exstyle_dict = { - style_type: self._load_exstylecode(style_type) - for style_type in self.style_types - } - - @staticmethod - def _create_dlib_landmark_model(): - url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' - path = pathlib.Path('shape_predictor_68_face_landmarks.dat') - if not path.exists(): - bz2_path = 'shape_predictor_68_face_landmarks.dat.bz2' - torch.hub.download_url_to_file(url, bz2_path) - subprocess.run(f'bunzip2 -d {bz2_path}'.split()) - return dlib.shape_predictor(path.as_posix()) - - def _load_encoder(self) -> nn.Module: - ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO, - 'models/encoder.pt') - ckpt = torch.load(ckpt_path, map_location='cpu') - opts = ckpt['opts'] - opts['device'] = self.device.type - opts['checkpoint_path'] = ckpt_path - opts = argparse.Namespace(**opts) - model = pSp(opts) - model.to(self.device) - model.eval() - - ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO, - 'models/encoder_wplus.pt') - ckpt = torch.load(ckpt_path, map_location='cpu') - opts = ckpt['opts'] - opts['device'] = self.device.type - opts['checkpoint_path'] = ckpt_path - opts['output_size'] = 1024 - opts = argparse.Namespace(**opts) - model2 = pSp(opts) - model2.to(self.device) - model2.eval() - - return {'z+': model, 'w+': model2} - - @staticmethod - def _create_transform() -> Callable: - transform = T.Compose([ - T.Resize(256), - T.CenterCrop(256), - T.ToTensor(), - T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ]) - return transform - - def _load_generator(self, style_type: str) -> nn.Module: - model = DualStyleGAN(1024, 512, 8, 2, res_index=6) - ckpt_path = huggingface_hub.hf_hub_download( - MODEL_REPO, f'models/{style_type}/generator.pt') - ckpt = torch.load(ckpt_path, map_location='cpu') - model.load_state_dict(ckpt['g_ema']) - model.to(self.device) - model.eval() - return model - - @staticmethod - def _load_exstylecode(style_type: str) -> dict[str, np.ndarray]: - if style_type in ['cartoon', 'caricature', 'anime']: - filename = 'refined_exstyle_code.npy' - else: - filename = 'exstyle_code.npy' - path = huggingface_hub.hf_hub_download( - MODEL_REPO, f'models/{style_type}/{filename}') - exstyles = np.load(path, allow_pickle=True).item() - return exstyles - - def detect_and_align_face(self, image_path) -> np.ndarray: - image = align_face(filepath=image_path, predictor=self.landmark_model) - x, y = np.random.randint(255), np.random.randint(255) - r, g, b = image.getpixel((x, y)) - image.putpixel( - (x, y), (r, g + 1, b) - ) # trick to make sure run reconstruct_face() once any input setting changes - return image - - @staticmethod - def denormalize(tensor: torch.Tensor) -> torch.Tensor: - return torch.clamp((tensor + 1) / 2 * 255, 0, 255).to(torch.uint8) - - def postprocess(self, tensor: torch.Tensor) -> np.ndarray: - tensor = self.denormalize(tensor) - return tensor.cpu().numpy().transpose(1, 2, 0) - - @torch.inference_mode() - def reconstruct_face(self, image: np.ndarray, - encoder_type: str) -> tuple[np.ndarray, torch.Tensor]: - if encoder_type == 'Z+ encoder (better stylization)': - self.encoder_type = 'z+' - z_plus_latent = True - return_z_plus_latent = True - else: - self.encoder_type = 'w+' - z_plus_latent = False - return_z_plus_latent = False - image = PIL.Image.fromarray(image) - input_data = self.transform(image).unsqueeze(0).to(self.device) - img_rec, instyle = self.encoder_dict[self.encoder_type]( - input_data, - randomize_noise=False, - return_latents=True, - z_plus_latent=z_plus_latent, - return_z_plus_latent=return_z_plus_latent, - resize=False) - img_rec = torch.clamp(img_rec.detach(), -1, 1) - img_rec = self.postprocess(img_rec[0]) - return img_rec, instyle - - @torch.inference_mode() - def generate(self, style_type: str, style_id: int, structure_weight: float, - color_weight: float, structure_only: bool, - instyle: torch.Tensor) -> np.ndarray: - - if self.encoder_type == 'z+': - z_plus_latent = True - input_is_latent = False - else: - z_plus_latent = False - input_is_latent = True - - generator = self.generator_dict[style_type] - exstyles = self.exstyle_dict[style_type] - - style_id = int(style_id) - stylename = list(exstyles.keys())[style_id] - - latent = torch.tensor(exstyles[stylename]).to(self.device) - if structure_only and self.encoder_type == 'z+': - latent[0, 7:18] = instyle[0, 7:18] - exstyle = generator.generator.style( - latent.reshape(latent.shape[0] * latent.shape[1], - latent.shape[2])).reshape(latent.shape) - if structure_only and self.encoder_type == 'w+': - exstyle[:, 7:18] = instyle[:, 7:18] - - img_gen, _ = generator([instyle], - exstyle, - input_is_latent=input_is_latent, - z_plus_latent=z_plus_latent, - truncation=0.7, - truncation_latent=0, - use_res=True, - interp_weights=[structure_weight] * 7 + - [color_weight] * 11) - img_gen = torch.clamp(img_gen.detach(), -1, 1) - img_gen = self.postprocess(img_gen[0]) - return img_gen diff --git a/spaces/CVPR/LIVE/cmake/FindThrust.cmake b/spaces/CVPR/LIVE/cmake/FindThrust.cmake deleted file mode 100644 index 61eef297b996496f4222d6afb570fb5aa960781d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/cmake/FindThrust.cmake +++ /dev/null @@ -1,40 +0,0 @@ -##============================================================================= -## -## Copyright (c) Kitware, Inc. -## All rights reserved. -## See LICENSE.txt for details. -## -## This software is distributed WITHOUT ANY WARRANTY; without even -## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE. See the above copyright notice for more information. -## -## Copyright 2012 Sandia Corporation. -## Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -## the U.S. Government retains certain rights in this software. -## -##============================================================================= - -# -# FindThrust -# -# This module finds the Thrust header files and extrats their version. It -# sets the following variables. -# -# THRUST_INCLUDE_DIR - Include directory for thrust header files. (All header -# files will actually be in the thrust subdirectory.) -# THRUST_VERSION - Version of thrust in the form "major.minor.patch". -# - -find_path(THRUST_INCLUDE_DIR - HINTS /usr/include/cuda - /usr/local/include - /usr/local/cuda/include - ${CUDA_INCLUDE_DIRS} - ./thrust - ../thrust - NAMES thrust/version.h -) - -if (THRUST_INCLUDE_DIR) - set(THRUST_FOUND TRUE) -endif () \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/async/reduce.h b/spaces/CVPR/LIVE/thrust/thrust/async/reduce.h deleted file mode 100644 index da2b1195d0acbb2d50fff2054e82ae4a7ae03f58..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/async/reduce.h +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file async/reduce.h - * \brief Functions for asynchronously reducing a range to a single value. - */ - -#pragma once - -#include -#include - -#if THRUST_CPP_DIALECT >= 2014 - -#include -#include -#include -#include -#include -#include - -#include - -namespace thrust -{ - -namespace async -{ - -namespace unimplemented -{ - -template < - typename DerivedPolicy -, typename ForwardIt, typename Sentinel, typename T, typename BinaryOp -> -__host__ -future -async_reduce( - thrust::execution_policy&, ForwardIt, Sentinel, T, BinaryOp -) -{ - THRUST_STATIC_ASSERT_MSG( - (thrust::detail::depend_on_instantiation::value) - , "this algorithm is not implemented for the specified system" - ); - return {}; -} - -} // namespace unimplemented - -namespace reduce_detail -{ - -using thrust::async::unimplemented::async_reduce; - -struct reduce_fn final -{ - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel, typename T, typename BinaryOp - > - __host__ - static auto call( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , T&& init - , BinaryOp&& op - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(init) - , THRUST_FWD(op) - ) - ) - - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel, typename T - > - __host__ - static auto call4( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , T&& init - , thrust::true_type - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(init) - , thrust::plus>{} - ) - ) - - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel - > - __host__ - static auto - call3( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , thrust::true_type - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , typename iterator_traits>::value_type{} - , thrust::plus< - remove_cvref_t< - typename iterator_traits>::value_type - > - >{} - ) - ) - - template - __host__ - static auto call4(ForwardIt&& first, Sentinel&& last, - T&& init, - BinaryOp&& op, - thrust::false_type) - THRUST_RETURNS( - reduce_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(init) - , THRUST_FWD(op) - ) - ) - - template - __host__ - static auto call3(ForwardIt&& first, Sentinel&& last, - T&& init, - thrust::false_type) - THRUST_RETURNS( - reduce_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(init) - , thrust::plus>{} - ) - ) - - // MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect - // if T1 is an execution_policy by using SFINAE. Switching to a static - // dispatch pattern to prevent this. - template - __host__ - static auto call(T1&& t1, T2&& t2, T3&& t3) - THRUST_RETURNS( - reduce_fn::call3(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), - thrust::is_execution_policy>{}) - ) - - template - __host__ - static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4) - THRUST_RETURNS( - reduce_fn::call4(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4), - thrust::is_execution_policy>{}) - ) - - template - __host__ - static auto call(ForwardIt&& first, Sentinel&& last) - THRUST_RETURNS( - reduce_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , typename iterator_traits>::value_type{} - , thrust::plus< - remove_cvref_t< - typename iterator_traits>::value_type - > - >{} - ) - ) - - template - THRUST_NODISCARD __host__ - auto operator()(Args&&... args) const - THRUST_RETURNS( - call(THRUST_FWD(args)...) - ) -}; - -} // namespace reduce_detail - -THRUST_INLINE_CONSTANT reduce_detail::reduce_fn reduce{}; - -/////////////////////////////////////////////////////////////////////////////// - -namespace unimplemented -{ - -template < - typename DerivedPolicy -, typename ForwardIt, typename Sentinel, typename OutputIt -, typename T, typename BinaryOp -> -__host__ -event -async_reduce_into( - thrust::execution_policy& -, ForwardIt, Sentinel, OutputIt, T, BinaryOp -) -{ - THRUST_STATIC_ASSERT_MSG( - (thrust::detail::depend_on_instantiation::value) - , "this algorithm is not implemented for the specified system" - ); - return {}; -} - -} // namespace unimplemented - -namespace reduce_into_detail -{ - -using thrust::async::unimplemented::async_reduce_into; - -struct reduce_into_fn final -{ - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel, typename OutputIt - , typename T, typename BinaryOp - > - __host__ - static auto call( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , OutputIt&& output - , T&& init - , BinaryOp&& op - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce_into( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , THRUST_FWD(init) - , THRUST_FWD(op) - ) - ) - - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel, typename OutputIt - , typename T - > - __host__ - static auto call5( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , OutputIt&& output - , T&& init - , thrust::true_type - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce_into( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , THRUST_FWD(init) - , thrust::plus>{} - ) - ) - - template < - typename DerivedPolicy - , typename ForwardIt, typename Sentinel, typename OutputIt - > - __host__ - static auto - call4( - thrust::detail::execution_policy_base const& exec - , ForwardIt&& first, Sentinel&& last - , OutputIt&& output - , thrust::true_type - ) - // ADL dispatch. - THRUST_RETURNS( - async_reduce_into( - thrust::detail::derived_cast(thrust::detail::strip_const(exec)) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , typename iterator_traits>::value_type{} - , thrust::plus< - remove_cvref_t< - typename iterator_traits>::value_type - > - >{} - ) - ) - - template < - typename ForwardIt, typename Sentinel, typename OutputIt - , typename T, typename BinaryOp - > - __host__ - static auto call5( - ForwardIt&& first, Sentinel&& last - , OutputIt&& output - , T&& init - , BinaryOp&& op - , thrust::false_type - ) - THRUST_RETURNS( - reduce_into_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - , typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , THRUST_FWD(init) - , THRUST_FWD(op) - ) - ) - - template < - typename ForwardIt, typename Sentinel, typename OutputIt - , typename T - > - __host__ - static auto call4( - ForwardIt&& first, Sentinel&& last - , OutputIt&& output - , T&& init - , thrust::false_type - ) - THRUST_RETURNS( - reduce_into_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - , typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , THRUST_FWD(init) - , thrust::plus>{} - ) - ) - - template < - typename ForwardIt, typename Sentinel, typename OutputIt - > - __host__ - static auto call( - ForwardIt&& first, Sentinel&& last - , OutputIt&& output - ) - THRUST_RETURNS( - reduce_into_fn::call( - thrust::detail::select_system( - typename iterator_system>::type{} - , typename iterator_system>::type{} - ) - , THRUST_FWD(first), THRUST_FWD(last) - , THRUST_FWD(output) - , typename iterator_traits>::value_type{} - , thrust::plus< - remove_cvref_t< - typename iterator_traits>::value_type - > - >{} - ) - ) - - // MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect - // if T1 is an execution_policy by using SFINAE. Switching to a static - // dispatch pattern to prevent this. - template - __host__ - static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4) - THRUST_RETURNS( - reduce_into_fn::call4( - THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4), - thrust::is_execution_policy>{}) - ) - - template - __host__ - static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4, T5&& t5) - THRUST_RETURNS( - reduce_into_fn::call5( - THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4), - THRUST_FWD(t5), thrust::is_execution_policy>{}) - ) - - template - THRUST_NODISCARD __host__ - auto operator()(Args&&... args) const - THRUST_RETURNS( - call(THRUST_FWD(args)...) - ) -}; - -} // namespace reduce_into_detail - -THRUST_INLINE_CONSTANT reduce_into_detail::reduce_into_fn reduce_into{}; - -} // namespace async - -} // end namespace thrust - -#endif - diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h deleted file mode 100644 index 8f5374b165d06a582ded7fe7cffebc70822dcf2f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -namespace thrust -{ -namespace detail -{ - - -template - struct iterator_category_with_system_and_traversal - : Category -{ -}; // end iterator_category_with_system_and_traversal - - -// specialize iterator_category_to_system for iterator_category_with_system_and_traversal -template struct iterator_category_to_system; - -template - struct iterator_category_to_system > -{ - typedef System type; -}; // end iterator_category_to_system - - -// specialize iterator_category_to_traversal for iterator_category_with_system_and_traversal -template struct iterator_category_to_traversal; - -template - struct iterator_category_to_traversal > -{ - typedef Traversal type; -}; // end iterator_category_to_traversal - - - -} // end detail -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/copy_if.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/copy_if.h deleted file mode 100644 index d441862ab6cec2ef6ed87e21f5f926e81c32a5fd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/copy_if.h +++ /dev/null @@ -1,857 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ -// XXX declare generic copy_if interface -// to avoid circulular dependency from thrust/copy.h -template -__host__ __device__ - OutputIterator - copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - Predicate pred); - -template -__host__ __device__ - OutputIterator - copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred); - -namespace cuda_cub { - -namespace __copy_if { - - template - struct PtxPolicy - { - enum - { - BLOCK_THREADS = _BLOCK_THREADS, - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, - ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD, - }; - static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; - static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; - static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; - }; // struct PtxPolicy - - template - struct Tuning; - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_LDG, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning<350> - - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 10, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_LDG, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning<350> - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(3, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning<300> - - struct no_stencil_tag_ {}; - typedef no_stencil_tag_* no_stencil_tag; - template - struct CopyIfAgent - { - typedef typename iterator_traits::value_type item_type; - typedef typename iterator_traits::value_type stencil_type; - - typedef cub::ScanTileState ScanTileState; - - template - struct PtxPlan : Tuning::type - { - typedef Tuning tuning; - - typedef typename core::LoadIterator::type ItemsLoadIt; - typedef typename core::LoadIterator::type StencilLoadIt; - - typedef typename core::BlockLoad::type BlockLoadItems; - typedef typename core::BlockLoad::type BlockLoadStencil; - - typedef cub::TilePrefixCallbackOp - TilePrefixCallback; - - typedef cub::BlockScan - BlockScan; - - - union TempStorage - { - struct - { - typename BlockScan::TempStorage scan; - typename TilePrefixCallback::TempStorage prefix; - }; - - typename BlockLoadItems::TempStorage load_items; - typename BlockLoadStencil::TempStorage load_stencil; - - core::uninitialized_array raw_exchange; - }; // union TempStorage - }; // struct PtxPlan - - typedef typename core::specialize_plan_msvc10_war::type::type ptx_plan; - - typedef typename ptx_plan::ItemsLoadIt ItemsLoadIt; - typedef typename ptx_plan::StencilLoadIt StencilLoadIt; - typedef typename ptx_plan::BlockLoadItems BlockLoadItems; - typedef typename ptx_plan::BlockLoadStencil BlockLoadStencil; - typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback; - typedef typename ptx_plan::BlockScan BlockScan; - typedef typename ptx_plan::TempStorage TempStorage; - - enum - { - USE_STENCIL = !thrust::detail::is_same::value, - BLOCK_THREADS = ptx_plan::BLOCK_THREADS, - ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD, - ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE - }; - - struct impl - { - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - TempStorage & storage; - ScanTileState &tile_state; - ItemsLoadIt items_load_it; - StencilLoadIt stencil_load_it; - OutputIt output_it; - Predicate predicate; - Size num_items; - - //------------------------------------------ - // scatter results to memory - //------------------------------------------ - - THRUST_DEVICE_FUNCTION void - scatter(item_type (&items)[ITEMS_PER_THREAD], - Size (&selection_flags)[ITEMS_PER_THREAD], - Size (&selection_indices)[ITEMS_PER_THREAD], - int num_tile_selections, - Size num_selections_prefix) - { - using core::sync_threadblock; - -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int local_scatter_offset = selection_indices[ITEM] - - num_selections_prefix; - if (selection_flags[ITEM]) - { - new (&storage.raw_exchange[local_scatter_offset]) item_type(items[ITEM]); - } - } - - sync_threadblock(); - - for (int item = threadIdx.x; - item < num_tile_selections; - item += BLOCK_THREADS) - { - output_it[num_selections_prefix + item] = storage.raw_exchange[item]; - } - } // func scatter - - //------------------------------------------ - // specialize predicate on different types - //------------------------------------------ - - template - struct __tag {}; - - enum ItemStencil - { - ITEM, - STENCIL - }; - - template - struct wrap_value - { - T const & x; - THRUST_DEVICE_FUNCTION wrap_value(T const &x) : x(x) {} - - THRUST_DEVICE_FUNCTION T const &operator()() const { return x; }; - }; // struct wrap_type - - //------- item - - THRUST_DEVICE_FUNCTION bool - predicate_wrapper(wrap_value const &x, - __tag) - { - return predicate(x()); - } - - THRUST_DEVICE_FUNCTION bool - predicate_wrapper(wrap_value const &, - __tag) - { - return false; - } - - //-------- stencil - - template - THRUST_DEVICE_FUNCTION bool - predicate_wrapper(wrap_value const &x, - __tag) - { - return predicate(x()); - } - - THRUST_DEVICE_FUNCTION bool - predicate_wrapper(wrap_value const &, - __tag) - { - return false; - } - - - THRUST_DEVICE_FUNCTION bool - predicate_wrapper(wrap_value const &, - __tag) - { - return false; - } - - template - THRUST_DEVICE_FUNCTION void - compute_selection_flags(int num_tile_items, - T (&values)[ITEMS_PER_THREAD], - Size (&selection_flags)[ITEMS_PER_THREAD]) - { -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - // Out-of-bounds items are selection_flags - selection_flags[ITEM] = 1; - - if (!IS_LAST_TILE || - (Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items)) - { - selection_flags[ITEM] = - predicate_wrapper(wrap_value(values[ITEM]), - __tag()); - } - } - } - - //------------------------------------------ - // consume tiles - //------------------------------------------ - - template - Size THRUST_DEVICE_FUNCTION - consume_tile_impl(int num_tile_items, - int tile_idx, - Size tile_base) - { - item_type items_loc[ITEMS_PER_THREAD]; - Size selection_flags[ITEMS_PER_THREAD]; - Size selection_idx[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) { - BlockLoadItems(storage.load_items) - .Load(items_load_it + tile_base, - items_loc, - num_tile_items); - } - else - { - BlockLoadItems(storage.load_items) - .Load(items_load_it + tile_base, - items_loc); - } - - core::sync_threadblock(); - - if (USE_STENCIL) - { - stencil_type stencil_loc[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - { - BlockLoadStencil(storage.load_stencil) - .Load(stencil_load_it + tile_base, - stencil_loc, - num_tile_items); - } - else - { - BlockLoadStencil(storage.load_stencil) - .Load(stencil_load_it + tile_base, - stencil_loc); - } - - compute_selection_flags(num_tile_items, - stencil_loc, - selection_flags); - } - else /* Use predicate on items rather then stencil */ - { - compute_selection_flags(num_tile_items, - items_loc, - selection_flags); - } - - core::sync_threadblock(); - - Size num_tile_selections = 0; - Size num_selections = 0; - Size num_selections_prefix = 0; - if (IS_FIRST_TILE) - { - BlockScan(storage.scan) - .ExclusiveSum(selection_flags, - selection_idx, - num_tile_selections); - - if (threadIdx.x == 0) - { - // Update tile status if this is not the last tile - if (!IS_LAST_TILE) - tile_state.SetInclusive(0, num_tile_selections); - } - - // Do not count any out-of-bounds selections - if (IS_LAST_TILE) - { - int num_discount = ITEMS_PER_TILE - num_tile_items; - num_tile_selections -= num_discount; - } - num_selections = num_tile_selections; - } - else - { - TilePrefixCallback prefix_cb(tile_state, - storage.prefix, - cub::Sum(), - tile_idx); - BlockScan(storage.scan) - .ExclusiveSum(selection_flags, - selection_idx, - prefix_cb); - - num_selections = prefix_cb.GetInclusivePrefix(); - num_tile_selections = prefix_cb.GetBlockAggregate(); - num_selections_prefix = prefix_cb.GetExclusivePrefix(); - - if (IS_LAST_TILE) - { - int num_discount = ITEMS_PER_TILE - num_tile_items; - num_tile_selections -= num_discount; - num_selections -= num_discount; - } - } - - core::sync_threadblock(); - - scatter(items_loc, - selection_flags, - selection_idx, - num_tile_selections, - num_selections_prefix); - - - return num_selections; - } // func consume_tile_impl - - template - THRUST_DEVICE_FUNCTION Size - consume_tile(int num_tile_items, - int tile_idx, - Size tile_base) - { - if (tile_idx == 0) - { - return consume_tile_impl(num_tile_items, - tile_idx, - tile_base); - } - else - { - return consume_tile_impl(num_tile_items, - tile_idx, - tile_base); - } - } // func consume_tile - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - THRUST_DEVICE_FUNCTION impl(TempStorage & storage_, - ScanTileState & tile_state_, - ItemsIt items_it, - StencilIt stencil_it, - OutputIt output_it_, - Predicate predicate_, - Size num_items_, - int num_tiles, - NumSelectedOutputIt num_selected_out) - : storage(storage_), - tile_state(tile_state_), - items_load_it(core::make_load_iterator(ptx_plan(), items_it)), - stencil_load_it(core::make_load_iterator(ptx_plan(), stencil_it)), - output_it(output_it_), - predicate(predicate_), - num_items(num_items_) - { - int tile_idx = blockIdx.x; - Size tile_base = tile_idx * ITEMS_PER_TILE; - - if (tile_idx < num_tiles - 1) - { - consume_tile(ITEMS_PER_TILE, - tile_idx, - tile_base); - } - else - { - int num_remaining = static_cast(num_items - tile_base); - Size num_selections = consume_tile(num_remaining, - tile_idx, - tile_base); - if (threadIdx.x == 0) - { - *num_selected_out = num_selections; - } - } - } // ctor impl - }; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(ItemsIt items_it, - StencilIt stencil_it, - OutputIt output_it, - Predicate predicate, - Size num_items, - NumSelectedOutputIt num_selected_out, - ScanTileState tile_state, - int num_tiles, - char * shmem) - { - TempStorage &storage = *reinterpret_cast(shmem); - - impl(storage, - tile_state, - items_it, - stencil_it, - output_it, - predicate, - num_items, - num_tiles, - num_selected_out); - } - }; // struct CopyIfAgent - - template - struct InitAgent - { - template - struct PtxPlan : PtxPolicy<128> {}; - typedef core::specialize_plan ptx_plan; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(ScanTileState tile_state, - Size num_tiles, - NumSelectedIt num_selected_out, - char * /*shmem*/) - { - tile_state.InitializeStatus(num_tiles); - if (blockIdx.x == 0 && threadIdx.x == 0) - *num_selected_out = 0; - } - }; // struct InitAgent - - template - static cudaError_t THRUST_RUNTIME_FUNCTION - doit_step(void * d_temp_storage, - size_t & temp_storage_bytes, - ItemsIt items, - StencilIt stencil, - OutputIt output_it, - Predicate predicate, - NumSelectedOutIt num_selected_out, - Size num_items, - cudaStream_t stream, - bool debug_sync) - { - if (num_items == 0) - return cudaSuccess; - - using core::AgentLauncher; - using core::AgentPlan; - using core::get_agent_plan; - - typedef AgentLauncher< - CopyIfAgent > - copy_if_agent; - - typedef typename copy_if_agent::ScanTileState ScanTileState; - - typedef AgentLauncher< - InitAgent > - init_agent; - - - using core::get_plan; - typename get_plan::type init_plan = init_agent::get_plan(); - typename get_plan::type copy_if_plan = copy_if_agent::get_plan(stream); - - int tile_size = copy_if_plan.items_per_tile; - size_t num_tiles = (num_items + tile_size - 1) / tile_size; - - size_t vshmem_size = core::vshmem_size(copy_if_plan.shared_memory_size, - num_tiles); - - cudaError_t status = cudaSuccess; - if (num_items == 0) - return status; - - size_t allocation_sizes[2] = {0, vshmem_size}; - status = ScanTileState::AllocationSize(static_cast(num_tiles), allocation_sizes[0]); - CUDA_CUB_RET_IF_FAIL(status); - - - void* allocations[2] = {NULL, NULL}; - status = cub::AliasTemporaries(d_temp_storage, - temp_storage_bytes, - allocations, - allocation_sizes); - CUDA_CUB_RET_IF_FAIL(status); - - - if (d_temp_storage == NULL) - { - return status; - } - - ScanTileState tile_status; - status = tile_status.Init(static_cast(num_tiles), allocations[0], allocation_sizes[0]); - CUDA_CUB_RET_IF_FAIL(status); - - init_agent ia(init_plan, num_tiles, stream, "copy_if::init_agent", debug_sync); - - char *vshmem_ptr = vshmem_size > 0 ? (char*)allocations[1] : NULL; - - copy_if_agent pa(copy_if_plan, num_items, stream, vshmem_ptr, "copy_if::partition_agent", debug_sync); - - ia.launch(tile_status, num_tiles, num_selected_out); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - pa.launch(items, - stencil, - output_it, - predicate, - num_items, - num_selected_out, - tile_status, - num_tiles); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - return status; - } - - template - THRUST_RUNTIME_FUNCTION - OutputIt copy_if(execution_policy& policy, - InputIt first, - InputIt last, - StencilIt stencil, - OutputIt output, - Predicate predicate) - { - typedef int size_type; - - size_type num_items = static_cast(thrust::distance(first, last)); - size_t temp_storage_bytes = 0; - cudaStream_t stream = cuda_cub::stream(policy); - bool debug_sync = THRUST_DEBUG_SYNC_FLAG; - - if (num_items == 0) - return output; - - cudaError_t status; - status = doit_step(NULL, - temp_storage_bytes, - first, - stencil, - output, - predicate, - reinterpret_cast(NULL), - num_items, - stream, - debug_sync); - cuda_cub::throw_on_error(status, "copy_if failed on 1st step"); - - size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes}; - void * allocations[2] = {NULL, NULL}; - - size_t storage_size = 0; - - status = core::alias_storage(NULL, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "copy_if failed on 1st alias_storage"); - - // Allocate temporary storage. - thrust::detail::temporary_array - tmp(policy, storage_size); - void *ptr = static_cast(tmp.data().get()); - - status = core::alias_storage(ptr, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "copy_if failed on 2nd alias_storage"); - - size_type* d_num_selected_out - = thrust::detail::aligned_reinterpret_cast(allocations[0]); - - status = doit_step(allocations[1], - temp_storage_bytes, - first, - stencil, - output, - predicate, - d_num_selected_out, - num_items, - stream, - debug_sync); - cuda_cub::throw_on_error(status, "copy_if failed on 2nd step"); - - status = cuda_cub::synchronize(policy); - cuda_cub::throw_on_error(status, "copy_if failed to synchronize"); - - size_type num_selected = get_value(policy, d_num_selected_out); - - return output + num_selected; - } - -} // namespace __copy_if - -//------------------------- -// Thrust API entry points -//------------------------- - -__thrust_exec_check_disable__ -template -OutputIterator __host__ __device__ -copy_if(execution_policy &policy, - InputIterator first, - InputIterator last, - OutputIterator result, - Predicate pred) -{ - OutputIterator ret = result; - - if (__THRUST_HAS_CUDART__) - { - ret = __copy_if::copy_if(policy, - first, - last, - __copy_if::no_stencil_tag(), - result, - pred); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::copy_if(cvt_to_seq(derived_cast(policy)), - first, - last, - result, - pred); -#endif - } - return ret; -} // func copy_if - -__thrust_exec_check_disable__ -template -OutputIterator __host__ __device__ -copy_if(execution_policy &policy, - InputIterator first, - InputIterator last, - StencilIterator stencil, - OutputIterator result, - Predicate pred) -{ - OutputIterator ret = result; - - if (__THRUST_HAS_CUDART__) - { - ret = __copy_if::copy_if(policy, - first, - last, - stencil, - result, - pred); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::copy_if(cvt_to_seq(derived_cast(policy)), - first, - last, - stencil, - result, - pred); -#endif - } - return ret; -} // func copy_if - -} // namespace cuda_cub -} // end namespace thrust - -#include -#endif diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/set_operations.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/set_operations.h deleted file mode 100644 index 4dbee0ae40102a62e78dd804b683daa35cb15e7a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/set_operations.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ -OutputIterator set_difference(thrust::execution_policy &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ -OutputIterator set_difference(thrust::execution_policy &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -thrust::pair - set_difference_by_key(thrust::execution_policy &exec, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -template -__host__ __device__ -thrust::pair - set_difference_by_key(thrust::execution_policy &exec, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -OutputIterator set_intersection(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ -OutputIterator set_intersection(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -thrust::pair - set_intersection_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -template -__host__ __device__ -thrust::pair - set_intersection_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - OutputIterator1 keys_result, - OutputIterator2 values_result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -OutputIterator set_symmetric_difference(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ -OutputIterator set_symmetric_difference(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -thrust::pair - set_symmetric_difference_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -template -__host__ __device__ -thrust::pair - set_symmetric_difference_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -OutputIterator set_union(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ -OutputIterator set_union(thrust::execution_policy &system, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakOrdering comp); - - -template -__host__ __device__ -thrust::pair - set_union_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -template -__host__ __device__ -thrust::pair - set_union_by_key(thrust::execution_policy &system, - InputIterator1 keys_first1, - InputIterator1 keys_last1, - InputIterator2 keys_first2, - InputIterator2 keys_last2, - InputIterator3 values_first1, - InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result, - StrictWeakOrdering comp); - - -} // end namespace generic -} // end namespace detail -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/Cboudreau/AI_ZeroToHero/app.py b/spaces/Cboudreau/AI_ZeroToHero/app.py deleted file mode 100644 index 05adfa181088800fc3ff4f4847de72688e4fe5a5..0000000000000000000000000000000000000000 --- a/spaces/Cboudreau/AI_ZeroToHero/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import streamlit as st -import graphviz as gv -from graphviz import Graph -import folium -from streamlit_folium import folium_static - -# Define the cluster relations graph using gvmap -g = Graph(format='svg') -g.graph_attr['bgcolor'] = '#FFFFFF' -g.graph_attr['outputorder'] = 'edgesfirst' -g.graph_attr['size'] = '10,10' -g.node_attr['style'] = 'filled' -g.node_attr['shape'] = 'box' -g.node_attr['fillcolor'] = '#FFDAB9' - -with g.subgraph(name='cluster_NJ') as c: - c.graph_attr['bgcolor'] = '#ADD8E6' - c.node_attr['color'] = '#000000' - c.node_attr['fontcolor'] = '#000000' - c.attr(label='New Jersey', fontsize='24') - c.node('Hackensack Meridian Health', URL='https://www.hackensackmeridianhealth.org/', target='_blank', tooltip='Hackensack Meridian Health: Hackensack University Medical Center') - c.node('RWJBarnabas Health', URL='https://www.rwjbh.org/', target='_blank', tooltip='RWJBarnabas Health: Robert Wood Johnson University Hospital') - c.node('Atlantic Health System', URL='https://www.atlantichealth.org/', target='_blank', tooltip='Atlantic Health System: Morristown Medical Center') - c.node('Virtua Health', URL='https://www.virtua.org/', target='_blank', tooltip='Virtua Health: Virtua Memorial Hospital') - c.node('Inspira Health', URL='https://www.inspirahealthnetwork.org/', target='_blank', tooltip='Inspira Health: Inspira Medical Center Vineland') - c.node('Cooper University Health Care', URL='https://www.cooperhealth.org/', target='_blank', tooltip='Cooper University Health Care: Cooper University Hospital') - c.node('University Hospital', URL='https://www.uhnj.org/', target='_blank', tooltip='University Hospital: University Hospital') - c.node('Robert Wood Johnson University Hospital Hamilton', URL='https://www.rwjbh.org/robert-wood-johnson-university-hospital-hamilton/', target='_blank', tooltip='Robert Wood Johnson University Hospital Hamilton: Robert Wood Johnson University Hospital Hamilton') - c.node('Trinitas Regional Medical Center', URL='https://www.trinitasrmc.org/', target='_blank', tooltip='Trinitas Regional Medical Center: Trinitas Regional Medical Center') - c.node('Capital Health Regional Medical Center', URL='https://www.capitalhealth.org/', target='_blank', tooltip='Capital Health Regional Medical Center: Capital Health Regional Medical Center') - -# Render the graph using streamlit -st.graphviz_chart(g) - -# Define hospitals data -hospitals = [('Hackensack Meridian Health', 'Hackensack University Medical Center', 40.899886, -74.039179), - ('RWJBarnabas Health', 'Robert Wood Johnson University Hospital', 40.491301, -74.450611), - ('Atlantic Health System', 'Morristown Medical Center', 40.787231, -74.473851), - ('Virtua Health', 'Virtua Memorial Hospital', 39.931229, -75.025831), - ('Inspira Health', 'Inspira Medical Center Vineland', 39.460225, -75.035542), - ('Cooper University Health Care', 'Cooper University Hospital', 39.942743, -75.119090), - ('University Hospital', 'University Hospital', 40.742310, -74.177609), - ('Robert Wood Johnson University Hospital Hamilton', 'Robert Wood Johnson University Hospital Hamilton', 40.214008, -74.679619), - ('Trinitas Regional Medical Center', 'Trinitas Regional Medical Center', 40.661474, -74.215013), - ('Capital Health Regional Medical Center', 'Capital Health Regional Medical Center', 40.266778, -74.796452)] - -#Create a map centered on New Jersey -m = folium.Map(location=[40.0583, -74.4057], zoom_start=8) - -#Add markers for each hospital -for hospital in hospitals: - folium.Marker( - location=[hospital[2], hospital[3]], - popup=f'{hospital[1]}
      {hospital[2]},{hospital[3]}' - ).add_to(m) - -#Display the map in Streamlit -folium_static(m) diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/mihoyo/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/mihoyo/__init__.py deleted file mode 100644 index 13d31a63c982377c571678c0aba342c7d1123833..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/mihoyo/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from pathlib import Path -from typing import List - -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import make_png_or_gif - -img_dir = Path(__file__).parent / "images" - - -def mihoyo(images: List[BuildImage], texts, args): - mask = BuildImage.new("RGBA", (500, 60), (53, 49, 65, 230)) - logo = BuildImage.open(img_dir / "logo.png").resize_height(50) - - def make(img: BuildImage) -> BuildImage: - img = img.convert("RGBA").resize((500, 500), keep_ratio=True) - img.paste(mask, (0, 440), alpha=True) - img.paste(logo, ((img.width - logo.width) // 2, 445), alpha=True) - return img.circle_corner(100) - - return make_png_or_gif(images[0], make) - - -add_meme("mihoyo", mihoyo, min_images=1, max_images=1, keywords=["米哈游"]) diff --git a/spaces/CofAI/sd-2.1/README.md b/spaces/CofAI/sd-2.1/README.md deleted file mode 100644 index a6eb8b4d8aec2f17721c763ac3877a075efbe56e..0000000000000000000000000000000000000000 --- a/spaces/CofAI/sd-2.1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion 2.1 -emoji: 🔥 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: sd-2-1.py -pinned: true -license: openrail ---- - -This is the space for image generation! \ No newline at end of file diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py deleted file mode 100644 index 482081b8de7431282c8a017cd34d965c8f355bb0..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -from torch import nn - -from .roi_box_feature_extractors import make_roi_box_feature_extractor -from .roi_box_predictors import make_roi_box_predictor -from .inference import make_roi_box_post_processor -from .loss import make_roi_box_loss_evaluator - - -class ROIBoxHead(torch.nn.Module): - """ - Generic Box Head class. - """ - - def __init__(self, cfg, in_channels): - super(ROIBoxHead, self).__init__() - self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels) - self.predictor = make_roi_box_predictor( - cfg, self.feature_extractor.out_channels) - self.post_processor = make_roi_box_post_processor(cfg) - self.loss_evaluator = make_roi_box_loss_evaluator(cfg) - - def forward(self, features, proposals, targets=None): - """ - Arguments: - features (list[Tensor]): feature-maps from possibly several levels - proposals (list[BoxList]): proposal boxes - targets (list[BoxList], optional): the ground-truth targets. - - Returns: - x (Tensor): the result of the feature extractor - proposals (list[BoxList]): during training, the subsampled proposals - are returned. During testing, the predicted boxlists are returned - losses (dict[Tensor]): During training, returns the losses for the - head. During testing, returns an empty dict. - """ - - if self.training: - # Faster R-CNN subsamples during training the proposals with a fixed - # positive / negative ratio - with torch.no_grad(): - proposals = self.loss_evaluator.subsample(proposals, targets) - - # extract features that will be fed to the final classifier. The - # feature_extractor generally corresponds to the pooler + heads - x = self.feature_extractor(features, proposals) - # final classifier that converts the features into predictions - class_logits, box_regression = self.predictor(x) - - if not self.training: - result = self.post_processor((class_logits, box_regression), proposals) - return x, result, {} - - loss_classifier, loss_box_reg = self.loss_evaluator( - [class_logits], [box_regression] - ) - return ( - x, - proposals, - dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg), - ) - - -def build_roi_box_head(cfg, in_channels): - """ - Constructs a new box head. - By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class - and make it a parameter in the config - """ - return ROIBoxHead(cfg, in_channels) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_n_a_m_e.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_n_a_m_e.py deleted file mode 100644 index 152e4f268dca40bf2596833154b64e114289c78a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_n_a_m_e.py +++ /dev/null @@ -1,1213 +0,0 @@ -# -*- coding: utf-8 -*- -from fontTools.misc import sstruct -from fontTools.misc.textTools import ( - bytechr, - byteord, - bytesjoin, - strjoin, - tobytes, - tostr, - safeEval, -) -from fontTools.misc.encodingTools import getEncoding -from fontTools.ttLib import newTable -from fontTools.ttLib.ttVisitor import TTVisitor -from fontTools import ttLib -import fontTools.ttLib.tables.otTables as otTables -from fontTools.ttLib.tables import C_P_A_L_ -from . import DefaultTable -import struct -import logging - - -log = logging.getLogger(__name__) - -nameRecordFormat = """ - > # big endian - platformID: H - platEncID: H - langID: H - nameID: H - length: H - offset: H -""" - -nameRecordSize = sstruct.calcsize(nameRecordFormat) - - -class table__n_a_m_e(DefaultTable.DefaultTable): - dependencies = ["ltag"] - - def decompile(self, data, ttFont): - format, n, stringOffset = struct.unpack(b">HHH", data[:6]) - expectedStringOffset = 6 + n * nameRecordSize - if stringOffset != expectedStringOffset: - log.error( - "'name' table stringOffset incorrect. Expected: %s; Actual: %s", - expectedStringOffset, - stringOffset, - ) - stringData = data[stringOffset:] - data = data[6:] - self.names = [] - for i in range(n): - if len(data) < 12: - log.error("skipping malformed name record #%d", i) - continue - name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) - name.string = stringData[name.offset : name.offset + name.length] - if name.offset + name.length > len(stringData): - log.error("skipping malformed name record #%d", i) - continue - assert len(name.string) == name.length - # if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): - # if len(name.string) % 2: - # print "2-byte string doesn't have even length!" - # print name.__dict__ - del name.offset, name.length - self.names.append(name) - - def compile(self, ttFont): - if not hasattr(self, "names"): - # only happens when there are NO name table entries read - # from the TTX file - self.names = [] - names = self.names - names.sort() # sort according to the spec; see NameRecord.__lt__() - stringData = b"" - format = 0 - n = len(names) - stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) - data = struct.pack(b">HHH", format, n, stringOffset) - lastoffset = 0 - done = {} # remember the data so we can reuse the "pointers" - for name in names: - string = name.toBytes() - if string in done: - name.offset, name.length = done[string] - else: - name.offset, name.length = done[string] = len(stringData), len(string) - stringData = bytesjoin([stringData, string]) - data = data + sstruct.pack(nameRecordFormat, name) - return data + stringData - - def toXML(self, writer, ttFont): - for name in self.names: - name.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name != "namerecord": - return # ignore unknown tags - if not hasattr(self, "names"): - self.names = [] - name = NameRecord() - self.names.append(name) - name.fromXML(name, attrs, content, ttFont) - - def getName(self, nameID, platformID, platEncID, langID=None): - for namerecord in self.names: - if ( - namerecord.nameID == nameID - and namerecord.platformID == platformID - and namerecord.platEncID == platEncID - ): - if langID is None or namerecord.langID == langID: - return namerecord - return None # not found - - def getDebugName(self, nameID): - englishName = someName = None - for name in self.names: - if name.nameID != nameID: - continue - try: - unistr = name.toUnicode() - except UnicodeDecodeError: - continue - - someName = unistr - if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): - englishName = unistr - break - if englishName: - return englishName - elif someName: - return someName - else: - return None - - def getFirstDebugName(self, nameIDs): - for nameID in nameIDs: - name = self.getDebugName(nameID) - if name is not None: - return name - return None - - def getBestFamilyName(self): - # 21 = WWS Family Name - # 16 = Typographic Family Name - # 1 = Family Name - return self.getFirstDebugName((21, 16, 1)) - - def getBestSubFamilyName(self): - # 22 = WWS SubFamily Name - # 17 = Typographic SubFamily Name - # 2 = SubFamily Name - return self.getFirstDebugName((22, 17, 2)) - - def getBestFullName(self): - # 4 = Full Name - # 6 = PostScript Name - for nameIDs in ((21, 22), (16, 17), (1, 2), (4,), (6,)): - if len(nameIDs) == 2: - name_fam = self.getDebugName(nameIDs[0]) - name_subfam = self.getDebugName(nameIDs[1]) - if None in [name_fam, name_subfam]: - continue # if any is None, skip - name = f"{name_fam} {name_subfam}" - if name_subfam.lower() == "regular": - name = f"{name_fam}" - return name - else: - name = self.getDebugName(nameIDs[0]) - if name is not None: - return name - return None - - def setName(self, string, nameID, platformID, platEncID, langID): - """Set the 'string' for the name record identified by 'nameID', 'platformID', - 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it - and append to the name table. - - 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case, - it is assumed to be already encoded with the correct plaform-specific encoding - identified by the (platformID, platEncID, langID) triplet. A warning is issued - to prevent unexpected results. - """ - if not hasattr(self, "names"): - self.names = [] - if not isinstance(string, str): - if isinstance(string, bytes): - log.warning( - "name string is bytes, ensure it's correctly encoded: %r", string - ) - else: - raise TypeError( - "expected unicode or bytes, found %s: %r" - % (type(string).__name__, string) - ) - namerecord = self.getName(nameID, platformID, platEncID, langID) - if namerecord: - namerecord.string = string - else: - self.names.append(makeName(string, nameID, platformID, platEncID, langID)) - - def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None): - """Remove any name records identified by the given combination of 'nameID', - 'platformID', 'platEncID' and 'langID'. - """ - args = { - argName: argValue - for argName, argValue in ( - ("nameID", nameID), - ("platformID", platformID), - ("platEncID", platEncID), - ("langID", langID), - ) - if argValue is not None - } - if not args: - # no arguments, nothing to do - return - self.names = [ - rec - for rec in self.names - if any( - argValue != getattr(rec, argName) for argName, argValue in args.items() - ) - ] - - @staticmethod - def removeUnusedNames(ttFont): - """Remove any name records which are not in NameID range 0-255 and not utilized - within the font itself.""" - visitor = NameRecordVisitor() - visitor.visit(ttFont) - toDelete = set() - for record in ttFont["name"].names: - # Name IDs 26 to 255, inclusive, are reserved for future standard names. - # https://learn.microsoft.com/en-us/typography/opentype/spec/name#name-ids - if record.nameID < 256: - continue - if record.nameID not in visitor.seen: - toDelete.add(record.nameID) - - if not toDelete: - return - log.info(f"Deleting name records with NameIDs {toDelete}") - for nameID in toDelete: - ttFont["name"].removeNames(nameID) - return toDelete - - def _findUnusedNameID(self, minNameID=256): - """Finds an unused name id. - - The nameID is assigned in the range between 'minNameID' and 32767 (inclusive), - following the last nameID in the name table. - """ - names = getattr(self, "names", []) - nameID = 1 + max([n.nameID for n in names] + [minNameID - 1]) - if nameID > 32767: - raise ValueError("nameID must be less than 32768") - return nameID - - def findMultilingualName(self, names, windows=True, mac=True, minNameID=0): - """Return the name ID of an existing multilingual name that - matches the 'names' dictionary, or None if not found. - - 'names' is a dictionary with the name in multiple languages, - such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. - The keys can be arbitrary IETF BCP 47 language codes; - the values are Unicode strings. - - If 'windows' is True, the returned name ID is guaranteed - exist for all requested languages for platformID=3 and - platEncID=1. - If 'mac' is True, the returned name ID is guaranteed to exist - for all requested languages for platformID=1 and platEncID=0. - - The returned name ID will not be less than the 'minNameID' - argument. - """ - # Gather the set of requested - # (string, platformID, platEncID, langID) - # tuples - reqNameSet = set() - for lang, name in sorted(names.items()): - if windows: - windowsName = _makeWindowsName(name, None, lang) - if windowsName is not None: - reqNameSet.add( - ( - windowsName.string, - windowsName.platformID, - windowsName.platEncID, - windowsName.langID, - ) - ) - if mac: - macName = _makeMacName(name, None, lang) - if macName is not None: - reqNameSet.add( - ( - macName.string, - macName.platformID, - macName.platEncID, - macName.langID, - ) - ) - - # Collect matching name IDs - matchingNames = dict() - for name in self.names: - try: - key = (name.toUnicode(), name.platformID, name.platEncID, name.langID) - except UnicodeDecodeError: - continue - if key in reqNameSet and name.nameID >= minNameID: - nameSet = matchingNames.setdefault(name.nameID, set()) - nameSet.add(key) - - # Return the first name ID that defines all requested strings - for nameID, nameSet in sorted(matchingNames.items()): - if nameSet == reqNameSet: - return nameID - - return None # not found - - def addMultilingualName( - self, names, ttFont=None, nameID=None, windows=True, mac=True, minNameID=0 - ): - """Add a multilingual name, returning its name ID - - 'names' is a dictionary with the name in multiple languages, - such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. - The keys can be arbitrary IETF BCP 47 language codes; - the values are Unicode strings. - - 'ttFont' is the TTFont to which the names are added, or None. - If present, the font's 'ltag' table can get populated - to store exotic language codes, which allows encoding - names that otherwise cannot get encoded at all. - - 'nameID' is the name ID to be used, or None to let the library - find an existing set of name records that match, or pick an - unused name ID. - - If 'windows' is True, a platformID=3 name record will be added. - If 'mac' is True, a platformID=1 name record will be added. - - If the 'nameID' argument is None, the created nameID will not - be less than the 'minNameID' argument. - """ - if not hasattr(self, "names"): - self.names = [] - if nameID is None: - # Reuse nameID if possible - nameID = self.findMultilingualName( - names, windows=windows, mac=mac, minNameID=minNameID - ) - if nameID is not None: - return nameID - nameID = self._findUnusedNameID() - # TODO: Should minimize BCP 47 language codes. - # https://github.com/fonttools/fonttools/issues/930 - for lang, name in sorted(names.items()): - if windows: - windowsName = _makeWindowsName(name, nameID, lang) - if windowsName is not None: - self.names.append(windowsName) - else: - # We cannot not make a Windows name: make sure we add a - # Mac name as a fallback. This can happen for exotic - # BCP47 language tags that have no Windows language code. - mac = True - if mac: - macName = _makeMacName(name, nameID, lang, ttFont) - if macName is not None: - self.names.append(macName) - return nameID - - def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255): - """Add a new name record containing 'string' for each (platformID, platEncID, - langID) tuple specified in the 'platforms' list. - - The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive), - following the last nameID in the name table. - If no 'platforms' are specified, two English name records are added, one for the - Macintosh (platformID=0), and one for the Windows platform (3). - - The 'string' must be a Unicode string, so it can be encoded with different, - platform-specific encodings. - - Return the new nameID. - """ - assert ( - len(platforms) > 0 - ), "'platforms' must contain at least one (platformID, platEncID, langID) tuple" - if not hasattr(self, "names"): - self.names = [] - if not isinstance(string, str): - raise TypeError( - "expected str, found %s: %r" % (type(string).__name__, string) - ) - nameID = self._findUnusedNameID(minNameID + 1) - for platformID, platEncID, langID in platforms: - self.names.append(makeName(string, nameID, platformID, platEncID, langID)) - return nameID - - -def makeName(string, nameID, platformID, platEncID, langID): - name = NameRecord() - name.string, name.nameID, name.platformID, name.platEncID, name.langID = ( - string, - nameID, - platformID, - platEncID, - langID, - ) - return name - - -def _makeWindowsName(name, nameID, language): - """Create a NameRecord for the Microsoft Windows platform - - 'language' is an arbitrary IETF BCP 47 language identifier such - as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows - does not support the desired language, the result will be None. - Future versions of fonttools might return a NameRecord for the - OpenType 'name' table format 1, but this is not implemented yet. - """ - langID = _WINDOWS_LANGUAGE_CODES.get(language.lower()) - if langID is not None: - return makeName(name, nameID, 3, 1, langID) - else: - log.warning( - "cannot add Windows name in language %s " - "because fonttools does not yet support " - "name table format 1" % language - ) - return None - - -def _makeMacName(name, nameID, language, font=None): - """Create a NameRecord for Apple platforms - - 'language' is an arbitrary IETF BCP 47 language identifier such - as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we - create a Macintosh NameRecord that is understood by old applications - (platform ID 1 and an old-style Macintosh language enum). If this - is not possible, we create a Unicode NameRecord (platform ID 0) - whose language points to the font’s 'ltag' table. The latter - can encode any string in any language, but legacy applications - might not recognize the format (in which case they will ignore - those names). - - 'font' should be the TTFont for which you want to create a name. - If 'font' is None, we only return NameRecords for legacy Macintosh; - in that case, the result will be None for names that need to - be encoded with an 'ltag' table. - - See the section “The language identifier” in Apple’s specification: - https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html - """ - macLang = _MAC_LANGUAGE_CODES.get(language.lower()) - macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang) - if macLang is not None and macScript is not None: - encoding = getEncoding(1, macScript, macLang, default="ascii") - # Check if we can actually encode this name. If we can't, - # for example because we have no support for the legacy - # encoding, or because the name string contains Unicode - # characters that the legacy encoding cannot represent, - # we fall back to encoding the name in Unicode and put - # the language tag into the ltag table. - try: - _ = tobytes(name, encoding, errors="strict") - return makeName(name, nameID, 1, macScript, macLang) - except UnicodeEncodeError: - pass - if font is not None: - ltag = font.tables.get("ltag") - if ltag is None: - ltag = font["ltag"] = newTable("ltag") - # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)” - # “The preferred platform-specific code for Unicode would be 3 or 4.” - # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html - return makeName(name, nameID, 0, 4, ltag.addTag(language)) - else: - log.warning( - "cannot store language %s into 'ltag' table " - "without having access to the TTFont object" % language - ) - return None - - -class NameRecord(object): - def getEncoding(self, default="ascii"): - """Returns the Python encoding name for this name entry based on its platformID, - platEncID, and langID. If encoding for these values is not known, by default - 'ascii' is returned. That can be overriden by passing a value to the default - argument. - """ - return getEncoding(self.platformID, self.platEncID, self.langID, default) - - def encodingIsUnicodeCompatible(self): - return self.getEncoding(None) in ["utf_16_be", "ucs2be", "ascii", "latin1"] - - def __str__(self): - return self.toStr(errors="backslashreplace") - - def isUnicode(self): - return self.platformID == 0 or ( - self.platformID == 3 and self.platEncID in [0, 1, 10] - ) - - def toUnicode(self, errors="strict"): - """ - If self.string is a Unicode string, return it; otherwise try decoding the - bytes in self.string to a Unicode string using the encoding of this - entry as returned by self.getEncoding(); Note that self.getEncoding() - returns 'ascii' if the encoding is unknown to the library. - - Certain heuristics are performed to recover data from bytes that are - ill-formed in the chosen encoding, or that otherwise look misencoded - (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE - but marked otherwise). If the bytes are ill-formed and the heuristics fail, - the error is handled according to the errors parameter to this function, which is - passed to the underlying decode() function; by default it throws a - UnicodeDecodeError exception. - - Note: The mentioned heuristics mean that roundtripping a font to XML and back - to binary might recover some misencoded data whereas just loading the font - and saving it back will not change them. - """ - - def isascii(b): - return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] - - encoding = self.getEncoding() - string = self.string - - if ( - isinstance(string, bytes) - and encoding == "utf_16_be" - and len(string) % 2 == 1 - ): - # Recover badly encoded UTF-16 strings that have an odd number of bytes: - # - If the last byte is zero, drop it. Otherwise, - # - If all the odd bytes are zero and all the even bytes are ASCII, - # prepend one zero byte. Otherwise, - # - If first byte is zero and all other bytes are ASCII, insert zero - # bytes between consecutive ASCII bytes. - # - # (Yes, I've seen all of these in the wild... sigh) - if byteord(string[-1]) == 0: - string = string[:-1] - elif all( - byteord(b) == 0 if i % 2 else isascii(byteord(b)) - for i, b in enumerate(string) - ): - string = b"\0" + string - elif byteord(string[0]) == 0 and all( - isascii(byteord(b)) for b in string[1:] - ): - string = bytesjoin(b"\0" + bytechr(byteord(b)) for b in string[1:]) - - string = tostr(string, encoding=encoding, errors=errors) - - # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. - # Fix it up. - if all( - ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i, c in enumerate(string) - ): - # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, - # narrow it down. - string = "".join(c for c in string[1::2]) - - return string - - def toBytes(self, errors="strict"): - """If self.string is a bytes object, return it; otherwise try encoding - the Unicode string in self.string to bytes using the encoding of this - entry as returned by self.getEncoding(); Note that self.getEncoding() - returns 'ascii' if the encoding is unknown to the library. - - If the Unicode string cannot be encoded to bytes in the chosen encoding, - the error is handled according to the errors parameter to this function, - which is passed to the underlying encode() function; by default it throws a - UnicodeEncodeError exception. - """ - return tobytes(self.string, encoding=self.getEncoding(), errors=errors) - - toStr = toUnicode - - def toXML(self, writer, ttFont): - try: - unistr = self.toUnicode() - except UnicodeDecodeError: - unistr = None - attrs = [ - ("nameID", self.nameID), - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ("langID", hex(self.langID)), - ] - - if unistr is None or not self.encodingIsUnicodeCompatible(): - attrs.append(("unicode", unistr is not None)) - - writer.begintag("namerecord", attrs) - writer.newline() - if unistr is not None: - writer.write(unistr) - else: - writer.write8bit(self.string) - writer.newline() - writer.endtag("namerecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.nameID = safeEval(attrs["nameID"]) - self.platformID = safeEval(attrs["platformID"]) - self.platEncID = safeEval(attrs["platEncID"]) - self.langID = safeEval(attrs["langID"]) - s = strjoin(content).strip() - encoding = self.getEncoding() - if self.encodingIsUnicodeCompatible() or safeEval( - attrs.get("unicode", "False") - ): - self.string = s.encode(encoding) - else: - # This is the inverse of write8bit... - self.string = s.encode("latin1") - - def __lt__(self, other): - if type(self) != type(other): - return NotImplemented - - try: - selfTuple = ( - self.platformID, - self.platEncID, - self.langID, - self.nameID, - ) - otherTuple = ( - other.platformID, - other.platEncID, - other.langID, - other.nameID, - ) - except AttributeError: - # This can only happen for - # 1) an object that is not a NameRecord, or - # 2) an unlikely incomplete NameRecord object which has not been - # fully populated - return NotImplemented - - try: - # Include the actual NameRecord string in the comparison tuples - selfTuple = selfTuple + (self.toBytes(),) - otherTuple = otherTuple + (other.toBytes(),) - except UnicodeEncodeError as e: - # toBytes caused an encoding error in either of the two, so content - # to sorting based on IDs only - log.error("NameRecord sorting failed to encode: %s" % e) - - # Implemented so that list.sort() sorts according to the spec by using - # the order of the tuple items and their comparison - return selfTuple < otherTuple - - def __repr__(self): - return "" % ( - self.nameID, - self.platformID, - self.langID, - ) - - -# Windows language ID → IETF BCP-47 language tag -# -# While Microsoft indicates a region/country for all its language -# IDs, we follow Unicode practice by omitting “most likely subtags” -# as per Unicode CLDR. For example, English is simply “en” and not -# “en-Latn” because according to Unicode, the default script -# for English is Latin. -# -# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html -# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry -_WINDOWS_LANGUAGES = { - 0x0436: "af", - 0x041C: "sq", - 0x0484: "gsw", - 0x045E: "am", - 0x1401: "ar-DZ", - 0x3C01: "ar-BH", - 0x0C01: "ar", - 0x0801: "ar-IQ", - 0x2C01: "ar-JO", - 0x3401: "ar-KW", - 0x3001: "ar-LB", - 0x1001: "ar-LY", - 0x1801: "ary", - 0x2001: "ar-OM", - 0x4001: "ar-QA", - 0x0401: "ar-SA", - 0x2801: "ar-SY", - 0x1C01: "aeb", - 0x3801: "ar-AE", - 0x2401: "ar-YE", - 0x042B: "hy", - 0x044D: "as", - 0x082C: "az-Cyrl", - 0x042C: "az", - 0x046D: "ba", - 0x042D: "eu", - 0x0423: "be", - 0x0845: "bn", - 0x0445: "bn-IN", - 0x201A: "bs-Cyrl", - 0x141A: "bs", - 0x047E: "br", - 0x0402: "bg", - 0x0403: "ca", - 0x0C04: "zh-HK", - 0x1404: "zh-MO", - 0x0804: "zh", - 0x1004: "zh-SG", - 0x0404: "zh-TW", - 0x0483: "co", - 0x041A: "hr", - 0x101A: "hr-BA", - 0x0405: "cs", - 0x0406: "da", - 0x048C: "prs", - 0x0465: "dv", - 0x0813: "nl-BE", - 0x0413: "nl", - 0x0C09: "en-AU", - 0x2809: "en-BZ", - 0x1009: "en-CA", - 0x2409: "en-029", - 0x4009: "en-IN", - 0x1809: "en-IE", - 0x2009: "en-JM", - 0x4409: "en-MY", - 0x1409: "en-NZ", - 0x3409: "en-PH", - 0x4809: "en-SG", - 0x1C09: "en-ZA", - 0x2C09: "en-TT", - 0x0809: "en-GB", - 0x0409: "en", - 0x3009: "en-ZW", - 0x0425: "et", - 0x0438: "fo", - 0x0464: "fil", - 0x040B: "fi", - 0x080C: "fr-BE", - 0x0C0C: "fr-CA", - 0x040C: "fr", - 0x140C: "fr-LU", - 0x180C: "fr-MC", - 0x100C: "fr-CH", - 0x0462: "fy", - 0x0456: "gl", - 0x0437: "ka", - 0x0C07: "de-AT", - 0x0407: "de", - 0x1407: "de-LI", - 0x1007: "de-LU", - 0x0807: "de-CH", - 0x0408: "el", - 0x046F: "kl", - 0x0447: "gu", - 0x0468: "ha", - 0x040D: "he", - 0x0439: "hi", - 0x040E: "hu", - 0x040F: "is", - 0x0470: "ig", - 0x0421: "id", - 0x045D: "iu", - 0x085D: "iu-Latn", - 0x083C: "ga", - 0x0434: "xh", - 0x0435: "zu", - 0x0410: "it", - 0x0810: "it-CH", - 0x0411: "ja", - 0x044B: "kn", - 0x043F: "kk", - 0x0453: "km", - 0x0486: "quc", - 0x0487: "rw", - 0x0441: "sw", - 0x0457: "kok", - 0x0412: "ko", - 0x0440: "ky", - 0x0454: "lo", - 0x0426: "lv", - 0x0427: "lt", - 0x082E: "dsb", - 0x046E: "lb", - 0x042F: "mk", - 0x083E: "ms-BN", - 0x043E: "ms", - 0x044C: "ml", - 0x043A: "mt", - 0x0481: "mi", - 0x047A: "arn", - 0x044E: "mr", - 0x047C: "moh", - 0x0450: "mn", - 0x0850: "mn-CN", - 0x0461: "ne", - 0x0414: "nb", - 0x0814: "nn", - 0x0482: "oc", - 0x0448: "or", - 0x0463: "ps", - 0x0415: "pl", - 0x0416: "pt", - 0x0816: "pt-PT", - 0x0446: "pa", - 0x046B: "qu-BO", - 0x086B: "qu-EC", - 0x0C6B: "qu", - 0x0418: "ro", - 0x0417: "rm", - 0x0419: "ru", - 0x243B: "smn", - 0x103B: "smj-NO", - 0x143B: "smj", - 0x0C3B: "se-FI", - 0x043B: "se", - 0x083B: "se-SE", - 0x203B: "sms", - 0x183B: "sma-NO", - 0x1C3B: "sms", - 0x044F: "sa", - 0x1C1A: "sr-Cyrl-BA", - 0x0C1A: "sr", - 0x181A: "sr-Latn-BA", - 0x081A: "sr-Latn", - 0x046C: "nso", - 0x0432: "tn", - 0x045B: "si", - 0x041B: "sk", - 0x0424: "sl", - 0x2C0A: "es-AR", - 0x400A: "es-BO", - 0x340A: "es-CL", - 0x240A: "es-CO", - 0x140A: "es-CR", - 0x1C0A: "es-DO", - 0x300A: "es-EC", - 0x440A: "es-SV", - 0x100A: "es-GT", - 0x480A: "es-HN", - 0x080A: "es-MX", - 0x4C0A: "es-NI", - 0x180A: "es-PA", - 0x3C0A: "es-PY", - 0x280A: "es-PE", - 0x500A: "es-PR", - # Microsoft has defined two different language codes for - # “Spanish with modern sorting” and “Spanish with traditional - # sorting”. This makes sense for collation APIs, and it would be - # possible to express this in BCP 47 language tags via Unicode - # extensions (eg., “es-u-co-trad” is “Spanish with traditional - # sorting”). However, for storing names in fonts, this distinction - # does not make sense, so we use “es” in both cases. - 0x0C0A: "es", - 0x040A: "es", - 0x540A: "es-US", - 0x380A: "es-UY", - 0x200A: "es-VE", - 0x081D: "sv-FI", - 0x041D: "sv", - 0x045A: "syr", - 0x0428: "tg", - 0x085F: "tzm", - 0x0449: "ta", - 0x0444: "tt", - 0x044A: "te", - 0x041E: "th", - 0x0451: "bo", - 0x041F: "tr", - 0x0442: "tk", - 0x0480: "ug", - 0x0422: "uk", - 0x042E: "hsb", - 0x0420: "ur", - 0x0843: "uz-Cyrl", - 0x0443: "uz", - 0x042A: "vi", - 0x0452: "cy", - 0x0488: "wo", - 0x0485: "sah", - 0x0478: "ii", - 0x046A: "yo", -} - - -_MAC_LANGUAGES = { - 0: "en", - 1: "fr", - 2: "de", - 3: "it", - 4: "nl", - 5: "sv", - 6: "es", - 7: "da", - 8: "pt", - 9: "no", - 10: "he", - 11: "ja", - 12: "ar", - 13: "fi", - 14: "el", - 15: "is", - 16: "mt", - 17: "tr", - 18: "hr", - 19: "zh-Hant", - 20: "ur", - 21: "hi", - 22: "th", - 23: "ko", - 24: "lt", - 25: "pl", - 26: "hu", - 27: "es", - 28: "lv", - 29: "se", - 30: "fo", - 31: "fa", - 32: "ru", - 33: "zh", - 34: "nl-BE", - 35: "ga", - 36: "sq", - 37: "ro", - 38: "cz", - 39: "sk", - 40: "sl", - 41: "yi", - 42: "sr", - 43: "mk", - 44: "bg", - 45: "uk", - 46: "be", - 47: "uz", - 48: "kk", - 49: "az-Cyrl", - 50: "az-Arab", - 51: "hy", - 52: "ka", - 53: "mo", - 54: "ky", - 55: "tg", - 56: "tk", - 57: "mn-CN", - 58: "mn", - 59: "ps", - 60: "ks", - 61: "ku", - 62: "sd", - 63: "bo", - 64: "ne", - 65: "sa", - 66: "mr", - 67: "bn", - 68: "as", - 69: "gu", - 70: "pa", - 71: "or", - 72: "ml", - 73: "kn", - 74: "ta", - 75: "te", - 76: "si", - 77: "my", - 78: "km", - 79: "lo", - 80: "vi", - 81: "id", - 82: "tl", - 83: "ms", - 84: "ms-Arab", - 85: "am", - 86: "ti", - 87: "om", - 88: "so", - 89: "sw", - 90: "rw", - 91: "rn", - 92: "ny", - 93: "mg", - 94: "eo", - 128: "cy", - 129: "eu", - 130: "ca", - 131: "la", - 132: "qu", - 133: "gn", - 134: "ay", - 135: "tt", - 136: "ug", - 137: "dz", - 138: "jv", - 139: "su", - 140: "gl", - 141: "af", - 142: "br", - 143: "iu", - 144: "gd", - 145: "gv", - 146: "ga", - 147: "to", - 148: "el-polyton", - 149: "kl", - 150: "az", - 151: "nn", -} - - -_WINDOWS_LANGUAGE_CODES = { - lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items() -} -_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()} - - -# MacOS language ID → MacOS script ID -# -# Note that the script ID is not sufficient to determine what encoding -# to use in TrueType files. For some languages, MacOS used a modification -# of a mainstream script. For example, an Icelandic name would be stored -# with smRoman in the TrueType naming table, but the actual encoding -# is a special Icelandic version of the normal Macintosh Roman encoding. -# As another example, Inuktitut uses an 8-bit encoding for Canadian Aboriginal -# Syllables but MacOS had run out of available script codes, so this was -# done as a (pretty radical) “modification” of Ethiopic. -# -# http://unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt -_MAC_LANGUAGE_TO_SCRIPT = { - 0: 0, # langEnglish → smRoman - 1: 0, # langFrench → smRoman - 2: 0, # langGerman → smRoman - 3: 0, # langItalian → smRoman - 4: 0, # langDutch → smRoman - 5: 0, # langSwedish → smRoman - 6: 0, # langSpanish → smRoman - 7: 0, # langDanish → smRoman - 8: 0, # langPortuguese → smRoman - 9: 0, # langNorwegian → smRoman - 10: 5, # langHebrew → smHebrew - 11: 1, # langJapanese → smJapanese - 12: 4, # langArabic → smArabic - 13: 0, # langFinnish → smRoman - 14: 6, # langGreek → smGreek - 15: 0, # langIcelandic → smRoman (modified) - 16: 0, # langMaltese → smRoman - 17: 0, # langTurkish → smRoman (modified) - 18: 0, # langCroatian → smRoman (modified) - 19: 2, # langTradChinese → smTradChinese - 20: 4, # langUrdu → smArabic - 21: 9, # langHindi → smDevanagari - 22: 21, # langThai → smThai - 23: 3, # langKorean → smKorean - 24: 29, # langLithuanian → smCentralEuroRoman - 25: 29, # langPolish → smCentralEuroRoman - 26: 29, # langHungarian → smCentralEuroRoman - 27: 29, # langEstonian → smCentralEuroRoman - 28: 29, # langLatvian → smCentralEuroRoman - 29: 0, # langSami → smRoman - 30: 0, # langFaroese → smRoman (modified) - 31: 4, # langFarsi → smArabic (modified) - 32: 7, # langRussian → smCyrillic - 33: 25, # langSimpChinese → smSimpChinese - 34: 0, # langFlemish → smRoman - 35: 0, # langIrishGaelic → smRoman (modified) - 36: 0, # langAlbanian → smRoman - 37: 0, # langRomanian → smRoman (modified) - 38: 29, # langCzech → smCentralEuroRoman - 39: 29, # langSlovak → smCentralEuroRoman - 40: 0, # langSlovenian → smRoman (modified) - 41: 5, # langYiddish → smHebrew - 42: 7, # langSerbian → smCyrillic - 43: 7, # langMacedonian → smCyrillic - 44: 7, # langBulgarian → smCyrillic - 45: 7, # langUkrainian → smCyrillic (modified) - 46: 7, # langByelorussian → smCyrillic - 47: 7, # langUzbek → smCyrillic - 48: 7, # langKazakh → smCyrillic - 49: 7, # langAzerbaijani → smCyrillic - 50: 4, # langAzerbaijanAr → smArabic - 51: 24, # langArmenian → smArmenian - 52: 23, # langGeorgian → smGeorgian - 53: 7, # langMoldavian → smCyrillic - 54: 7, # langKirghiz → smCyrillic - 55: 7, # langTajiki → smCyrillic - 56: 7, # langTurkmen → smCyrillic - 57: 27, # langMongolian → smMongolian - 58: 7, # langMongolianCyr → smCyrillic - 59: 4, # langPashto → smArabic - 60: 4, # langKurdish → smArabic - 61: 4, # langKashmiri → smArabic - 62: 4, # langSindhi → smArabic - 63: 26, # langTibetan → smTibetan - 64: 9, # langNepali → smDevanagari - 65: 9, # langSanskrit → smDevanagari - 66: 9, # langMarathi → smDevanagari - 67: 13, # langBengali → smBengali - 68: 13, # langAssamese → smBengali - 69: 11, # langGujarati → smGujarati - 70: 10, # langPunjabi → smGurmukhi - 71: 12, # langOriya → smOriya - 72: 17, # langMalayalam → smMalayalam - 73: 16, # langKannada → smKannada - 74: 14, # langTamil → smTamil - 75: 15, # langTelugu → smTelugu - 76: 18, # langSinhalese → smSinhalese - 77: 19, # langBurmese → smBurmese - 78: 20, # langKhmer → smKhmer - 79: 22, # langLao → smLao - 80: 30, # langVietnamese → smVietnamese - 81: 0, # langIndonesian → smRoman - 82: 0, # langTagalog → smRoman - 83: 0, # langMalayRoman → smRoman - 84: 4, # langMalayArabic → smArabic - 85: 28, # langAmharic → smEthiopic - 86: 28, # langTigrinya → smEthiopic - 87: 28, # langOromo → smEthiopic - 88: 0, # langSomali → smRoman - 89: 0, # langSwahili → smRoman - 90: 0, # langKinyarwanda → smRoman - 91: 0, # langRundi → smRoman - 92: 0, # langNyanja → smRoman - 93: 0, # langMalagasy → smRoman - 94: 0, # langEsperanto → smRoman - 128: 0, # langWelsh → smRoman (modified) - 129: 0, # langBasque → smRoman - 130: 0, # langCatalan → smRoman - 131: 0, # langLatin → smRoman - 132: 0, # langQuechua → smRoman - 133: 0, # langGuarani → smRoman - 134: 0, # langAymara → smRoman - 135: 7, # langTatar → smCyrillic - 136: 4, # langUighur → smArabic - 137: 26, # langDzongkha → smTibetan - 138: 0, # langJavaneseRom → smRoman - 139: 0, # langSundaneseRom → smRoman - 140: 0, # langGalician → smRoman - 141: 0, # langAfrikaans → smRoman - 142: 0, # langBreton → smRoman (modified) - 143: 28, # langInuktitut → smEthiopic (modified) - 144: 0, # langScottishGaelic → smRoman (modified) - 145: 0, # langManxGaelic → smRoman (modified) - 146: 0, # langIrishGaelicScript → smRoman (modified) - 147: 0, # langTongan → smRoman - 148: 6, # langGreekAncient → smRoman - 149: 0, # langGreenlandic → smRoman - 150: 0, # langAzerbaijanRoman → smRoman - 151: 0, # langNynorsk → smRoman -} - - -class NameRecordVisitor(TTVisitor): - def __init__(self): - self.seen = set() - - -@NameRecordVisitor.register_attrs( - ( - (otTables.FeatureParamsSize, ("SubfamilyID", "SubfamilyNameID")), - (otTables.FeatureParamsStylisticSet, ("UINameID",)), - ( - otTables.FeatureParamsCharacterVariants, - ( - "FeatUILabelNameID", - "FeatUITooltipTextNameID", - "SampleTextNameID", - "FirstParamUILabelNameID", - ), - ), - (otTables.STAT, ("ElidedFallbackNameID",)), - (otTables.AxisRecord, ("AxisNameID",)), - (otTables.AxisValue, ("ValueNameID",)), - (otTables.FeatureName, ("FeatureNameID",)), - (otTables.Setting, ("SettingNameID",)), - ) -) -def visit(visitor, obj, attr, value): - visitor.seen.add(value) - - -@NameRecordVisitor.register(ttLib.getTableClass("fvar")) -def visit(visitor, obj): - for inst in obj.instances: - if inst.postscriptNameID != 0xFFFF: - visitor.seen.add(inst.postscriptNameID) - visitor.seen.add(inst.subfamilyNameID) - - for axis in obj.axes: - visitor.seen.add(axis.axisNameID) - - -@NameRecordVisitor.register(ttLib.getTableClass("CPAL")) -def visit(visitor, obj): - if obj.version == 1: - visitor.seen.update(obj.paletteLabels) - visitor.seen.update(obj.paletteEntryLabels) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/markdown.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/markdown.py deleted file mode 100644 index 3ef36b1884bfbbb642d14f6851989bd47621cb35..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/markdown.py +++ /dev/null @@ -1,95 +0,0 @@ -"""gr.Markdown() component.""" - -from __future__ import annotations - -import inspect -from typing import Any, Callable, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable - -from gradio import utils -from gradio.components.base import Component, IOComponent, _Keywords -from gradio.events import ( - Changeable, -) - -set_documentation_group("component") - - -@document() -class Markdown(IOComponent, Changeable, StringSerializable): - """ - Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a valid {str} that can be rendered as Markdown. - - Demos: blocks_hello, blocks_kinematics - Guides: key-features - """ - - def __init__( - self, - value: str | Callable = "", - *, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - rtl: bool = False, - **kwargs, - ): - """ - Parameters: - value: Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - rtl: If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right. - """ - self.md = utils.get_markdown_parser() - self.rtl = rtl - IOComponent.__init__( - self, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - def postprocess(self, y: str | None) -> str | None: - """ - Parameters: - y: markdown representation - Returns: - HTML rendering of markdown - """ - if y is None: - return None - unindented_y = inspect.cleandoc(y) - return self.md.render(unindented_y) - - def get_config(self): - return { - "value": self.value, - "rtl": self.rtl, - **Component.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - rtl: bool | None = None, - ): - updated_config = { - "visible": visible, - "value": value, - "rtl": rtl, - "__type__": "update", - } - return updated_config - - def as_example(self, input_data: str | None) -> str: - postprocessed = self.postprocess(input_data) - return postprocessed if postprocessed else "" diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/state.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/state.py deleted file mode 100644 index 9722fa31e5240b8975af313d58bbcd83bb235fcd..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/state.py +++ /dev/null @@ -1,50 +0,0 @@ -"""gr.State() component.""" - -from __future__ import annotations - -from copy import deepcopy -from typing import Any - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable - -from gradio.components.base import IOComponent - -set_documentation_group("component") - - -@document() -class State(IOComponent, SimpleSerializable): - """ - Special hidden component that stores session state across runs of the demo by the - same user. The value of the State variable is cleared when the user refreshes the page. - - Preprocessing: No preprocessing is performed - Postprocessing: No postprocessing is performed - Demos: blocks_simple_squares - Guides: real-time-speech-recognition - """ - - allow_string_shortcut = False - - def __init__( - self, - value: Any = None, - **kwargs, - ): - """ - Parameters: - value: the initial value (of arbitrary type) of the state. The provided argument is deepcopied. If a callable is provided, the function will be called whenever the app loads to set the initial value of the state. - """ - self.stateful = True - IOComponent.__init__(self, value=deepcopy(value), **kwargs) - - -class Variable(State): - """Variable was renamed to State. This class is kept for backwards compatibility.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def get_block_name(self): - return "state" diff --git a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/share.ts b/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/share.ts deleted file mode 100644 index 4587669a10164aa7c961429fbddec9cf438c0eca..0000000000000000000000000000000000000000 --- a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/share.ts +++ /dev/null @@ -1,7 +0,0 @@ -export function share(url: string, title: string) { - if (navigator.share) { - navigator.share({ url, title }); - } else { - prompt("Copy this public url to share:", url); - } -} diff --git a/spaces/Demosthene-OR/avr23-cds-translation/tabs/id_lang_tab.py b/spaces/Demosthene-OR/avr23-cds-translation/tabs/id_lang_tab.py deleted file mode 100644 index aea5733ec3cfa639380cc732fa59eeb12cf167f1..0000000000000000000000000000000000000000 --- a/spaces/Demosthene-OR/avr23-cds-translation/tabs/id_lang_tab.py +++ /dev/null @@ -1,449 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import os -import matplotlib.pyplot as plt -import tiktoken -import random -import joblib -import json -import csv -from transformers import pipeline -import keras -from tensorflow.keras.preprocessing.sequence import pad_sequences -from sklearn.preprocessing import LabelEncoder -from sklearn.feature_extraction.text import CountVectorizer -from tensorflow.keras.utils import plot_model -from filesplit.merge import Merge -from extra_streamlit_components import tab_bar, TabBarItemData -from sklearn.decomposition import PCA -import matplotlib.pyplot as plt -import seaborn as sns -from sklearn import naive_bayes - -title = "Identification de langue" -sidebar_name = "Identification de langue" - -# CountVectorizer a une liste de phrase en entrée. -# Cette fonction met les données d'entrée dans le bon format -def format_to_vectorize(data): - X_tok = [] - if "DataFrame" in str(type(data)):sentences = data.tolist() - elif "str" in str(type(data)): - sentences =[data] - else: sentences = data - - for sentence in sentences: - X_tok.append(sentence) - return X_tok - -def create_BOW(data): - global vectorizer - - X_tok = format_to_vectorize(data) - X = vectorizer.transform(X_tok) - return X - -def load_vectorizer(tokenizer): - global dict_token, dict_ids, nb_token - - path = 'data/vectorizer_tiktoken_big.pkl' - vectorizer = joblib.load(path) - dict_token = {tokenizer.decode([cle]): cle for cle, valeur in vectorizer.vocabulary_.items()} - dict_ids = {cle: tokenizer.decode([cle]) for cle, valeur in vectorizer.vocabulary_.items()} #dict_ids.items()} - nb_token = len(vectorizer.vocabulary_) - return vectorizer - -def lang_id_nb(sentences): - global lan_to_language - - if "str" in str(type(sentences)): - return lan_to_language[clf_nb.predict(create_BOW(sentences))[0]] - else: return [lan_to_language[l] for l in clf_nb.predict(create_BOW(sentences))] - -@st.cache_resource -def init_nb_identifier(): - - tokenizer = tiktoken.get_encoding("cl100k_base") - - # Chargement du classificateur sauvegardé - clf_nb = joblib.load("data/id_lang_tiktoken_nb_sparse_big.pkl") - vectorizer = load_vectorizer(tokenizer) - - # Lisez le contenu du fichier JSON - with open('data/multilingue/lan_to_language.json', 'r') as fichier: - lan_to_language = json.load(fichier) - return tokenizer, dict_token, dict_ids, nb_token, lan_to_language, clf_nb, vectorizer - -def encode_text(textes): - global tokenizer - - max_length=250 - sequences = tokenizer.encode_batch(textes) - return pad_sequences(sequences, maxlen=max_length, padding='post') - -def read_list_lan(): - - with open('data/multilingue/lan_code.csv', 'r') as fichier_csv: - reader = csv.reader(fichier_csv) - lan_code = next(reader) - return lan_code - -@st.cache_resource -def init_dl_identifier(): - - label_encoder = LabelEncoder() - list_lan = read_list_lan() - lan_identified = [lan_to_language[l] for l in list_lan] - label_encoder.fit(list_lan) - merge = Merge("data/dl_id_lang_split", "data", "dl_tiktoken_id_language_model.h5").merge(cleanup=False) - dl_model = keras.models.load_model("data/dl_tiktoken_id_language_model.h5") - return dl_model, label_encoder, list_lan, lan_identified - -def lang_id_dl(sentences): - global dl_model, label_encoder - - if "str" in str(type(sentences)): predictions = dl_model.predict(encode_text([sentences])) - else: predictions = dl_model.predict(encode_text(sentences)) - # Décodage des prédictions en langues - predicted_labels_encoded = np.argmax(predictions, axis=1) - predicted_languages = label_encoder.classes_[predicted_labels_encoded] - if "str" in str(type(sentences)): return lan_to_language[predicted_languages[0]] - else: return [l for l in predicted_languages] - -@st.cache_resource -def init_lang_id_external(): - - lang_id_model_ext = pipeline('text-classification',model="papluca/xlm-roberta-base-language-detection") - dict_xlmr = {"ar":"ara", "bg":"bul", "de":"deu", "el": "ell", "en":"eng", "es":"spa", "fr":"fra", "hi": "hin","it":"ita","ja":"jpn", \ - "nl":"nld", "pl":"pol", "pt":"por", "ru":"rus", "sw":"swh", "th":"tha", "tr":"tur", "ur": "urd", "vi":"vie", "zh":"cmn"} - sentence_test = pd.read_csv('data//multilingue/sentence_test_extract.csv') - sentence_test = sentence_test[:4750] - # Instanciation d'un exemple - exemples = ["Er weiß überhaupt nichts über dieses Buch", # Phrase 0 - "Umbrellas sell well", # Phrase 1 - "elle adore les voitures très luxueuses, et toi ?", # Phrase 2 - "she loves very luxurious cars, don't you?", # Phrase 3 - "Vogliamo visitare il Colosseo e nuotare nel Tevere", # Phrase 4 - "vamos a la playa", # Phrase 5 - "Te propongo un trato", # Phrase 6 - "she loves you much, mais elle te hait aussi and das ist traurig", # Phrase 7 # Attention à cette phrase trilingue - "Elle a de belles loches" # Phrase 8 - ] - - lang_exemples = ['deu','eng','fra','eng','ita','spa','spa','fra','fra'] - return lang_id_model_ext, dict_xlmr, sentence_test, lang_exemples, exemples - -@st.cache_data -def display_acp(): - data = np.load('data/data_lang_id_acp.npz') - X_train_scaled = data['X_train_scaled'] - y_train_pred = data['y_train_pred'] - label_arrow = ['.', ',', '?', ' a', ' de', ' la', ' que', 'Tom', ' un', ' the', ' in', \ - ' to', 'I', "'", 'i', ' le', ' en', ' es', 'é', ' l', '!', 'o', ' ist', \ - ' pas', ' Tom', ' me', ' di', 'Ich', ' is', 'Je', ' nicht', ' you', \ - ' die', ' à', ' el', ' est', 'a', 'en', ' d', ' è', ' ne', ' se', ' no', \ - ' una', ' zu', 'Il', '¿', ' of', ' du', "'t", 'ato', ' der', ' il', \ - ' n', 'El', ' non', ' che', 'are', ' con', 'ó', ' was', 'La', 'No', \ - ' ?', 'es', 'le', 'L', ' and', ' des', ' s', ' ich', 'as', 'S', ' per', \ - ' das', ' und', ' ein', 'e', "'s", 'u', ' y', 'He', 'z', 'er', ' m', \ - 'st', ' les', 'Le', ' I', 'ar', 'te', 'Non', 'The', ' er', 'ie', ' v', \ - ' c', "'est", ' ha', ' den'] - - pca = PCA(n_components=2) - - X_new = pca.fit_transform(X_train_scaled) - coeff = pca.components_.transpose() - xs = X_new[:, 0] - ys = X_new[:, 1] - scalex = 1.0/(xs.max() - xs.min()) - scaley = 1.0/(ys.max() - ys.min()) - principalDf = pd.DataFrame({'PC1': xs*scalex, 'PC2': ys * scaley}) - finalDF = pd.concat([principalDf, pd.Series(y_train_pred, name='Langue')], axis=1) - - sns.set_context("poster") # Valeur possible:"notebook", "talk", "poster", ou "paper" - plt.rc("axes", titlesize=32,titleweight='bold') # Taille du titre de l'axe - plt.rc("axes", labelsize=18,labelweight='bold') # Taille des étiquettes de l'axe - plt.rc("xtick", labelsize=14) # Taille des étiquettes de l'axe des x - plt.rc("ytick", labelsize=14) # Taille des étiquettes de l'axe des y - - st.write("Affichage de 10 000 phrases (points) et des 50 tokens les + utilisés (flèches)") - st.write("") - fig = plt.figure(figsize=(20, 15)) - sns.scatterplot(x='PC1', y='PC2', hue='Langue', data=finalDF, alpha=0.5) - for i in range(50): - plt.arrow(0, 0, coeff[i, 0]*1.5, coeff[i, 1]*0.8,color='k', alpha=0.08, head_width=0.01, ) - plt.text(coeff[i, 0]*1.5, coeff[i, 1] * 0.8, label_arrow[i], color='k', weight='bold') - - plt.title("Importance des principaux tokens dans\nl'identification de langue par l'algorithme Naive Bayes") - plt.xlim(-0.4, 0.45) - plt.ylim(-0.15, 0.28); - st.pyplot(fig) - return - -@st.cache_data -def read_BOW_examples(): - return pd.read_csv('data/lang_id_small_BOW.csv') - -def analyse_nb(sel_phrase): - global lang_exemples,exemples - - def create_small_BOW(s): - encodage = tokenizer.encode(s) - sb = [0] * (df_BOW.shape[1]-1) - nb_unique_token = 0 - for i in range(df_BOW.shape[1]-1): - for t in encodage: - if df_BOW.columns[i]==str(t): - sb[i] += 1 - if sb[i] > 0: nb_unique_token +=1 - return sb, nb_unique_token - - st.write("#### **Probabilité d'appartenance de la phrase à une langue :**") - st.image("./assets/formule_proba_naive_bayes.png") - st.write("où **C** est la classe (lan_code), **Fi** est la caractéristique i du BOW, **Z** est l'\"evidence\" servant à regulariser la proba") - st.write("") - nb_lang = 5 - lan_code = ['deu','eng','fra','spa','ita'] - lan_color = {'deu':'violet','eng':'green','fra':'red','spa':'blue','ita':'orange'} - df_BOW = read_BOW_examples() - - clf_nb2 = naive_bayes.MultinomialNB() - clf_nb2.fit(df_BOW.drop(columns='lan_code').values.tolist(), df_BOW['lan_code'].values.tolist()) - - nb_phrases_lang =[] - for l in lan_code: - nb_phrases_lang.append(sum(df_BOW['lan_code']==l)) - st.write("Phrase à analyser :",'**:'+lan_color[lang_exemples[sel_phrase]]+'['+lang_exemples[sel_phrase],']** - **"'+exemples[sel_phrase]+'"**') - - # Tokenisation et encodage de la phrase - encodage = tokenizer.encode(exemples[sel_phrase]) - - # Création du vecteur BOW de la phrase - bow_exemple, nb_unique_token = create_small_BOW(exemples[sel_phrase]) - st.write("Nombre de tokens retenus dans le BOW: "+ str(nb_unique_token)) - masque_tokens_retenus = [(1 if token in list(dict_ids.keys()) else 0) for token in encodage] - str_token = " " - for i in range(len(encodage)): - if masque_tokens_retenus[i]==1: - if (i%2) ==0: - str_token += "**:red["+tokenizer.decode([encodage[i]])+"]** " - else: - str_token += "**:violet["+tokenizer.decode([encodage[i]])+"]** " - else: str_token += ":green["+tokenizer.decode([encodage[i]])+"] " - - st.write("Tokens se trouvant dans le modèle (en :red[rouge] ou :violet[violet]) :"+str_token+" ") - - st.write("") - # Afin de continuer l'analyse on ne garde que les token de la phrase disponibles dans le BOW - token_used = [str(encodage[i]) for i in range(len(encodage)) if (masque_tokens_retenus[i]==1)] - - - # Calcul du nombre d'apparition de ces tokens dans le BOW pour chaque langue, et stockage dans un DataFrame df_count - def compter_non_zero(colonne): - return (colonne != 0).sum() - - votes = [] - for i in range(nb_lang): - #votes.append(list(df_BOW[token_used].loc[df_BOW['lan_code']==lan_code[i]].sum(axis=0))) - votes.append(list(df_BOW[token_used].loc[df_BOW['lan_code']==lan_code[i]].apply(compter_non_zero))) - - col_name = [str(i+1)+'-'+tokenizer.decode([int(token_used[i])]) for i in range(len(token_used))] - df_count = pd.DataFrame(data=votes,columns=token_used, index=lan_code) - df_count.columns = col_name - st.write("\n**Nombre d'apparitions des tokens, dans chaque langue**") - - # Lissage de Laplace n°1 (Laplace smoothing ) - # df_count = df_count+1 - - st.dataframe(df_count) - - ######### - ######### 3. Calcul de la probabilité d'apparition de chaque token dans chaque langue - df_proba = df_count.div(nb_phrases_lang, axis = 0) - - # Lissage de Laplace n°2 (Laplace smoothing ) - df_proba = df_proba.replace(0.0,0.0010) - - # Initialisation de df_proba: Calcul de la probabilité conditionnelle d'appartenance de la phrase à une langue - df_proba['Proba'] = 1 - # Itérer sur les colonnes et effectuez la multiplication pour chaque ligne - for col in df_count.columns: - df_proba['Proba'] *= df_proba[col] - - ######### - ######### 4. Calcul (par multiplication) de la probabilité d'appartenance de la phrase à une langue - - # Multiplication par la probabilité de la classe - p_classe = [(nb_phrases_lang[i]/df_BOW.shape[0]) for i in range(len(nb_phrases_lang))] - df_proba['Proba'] *= p_classe - - # Diviser par l'evidence - evidence = df_proba['Proba'].sum(axis=0) - df_proba['Proba'] *= 1/evidence - df_proba['Proba'] = df_proba['Proba'].round(3) - - # Affichage de la matrice des probabilités - st.write("**Probabilités conditionnelles d'apparition des tokens retenus, dans chaque langue:**") - st.dataframe(df_proba) - str_token = "Lang proba max: "# "*20 - for i,token in enumerate(df_proba.columns[:-1]): - str_token += '*'+token+'*:**:'+lan_color[df_proba[token].idxmax()]+'['+df_proba[token].idxmax()+']**'+" "*2 #8 - st.write(str_token) - st.write("") - - st.write("Langue réelle de la phrase"+" "*35+": **:"+lan_color[lang_exemples[sel_phrase]]+'['+lang_exemples[sel_phrase]+']**') - st.write("Langue dont la probabilité est la plus forte "+": **:"+lan_color[df_proba['Proba'].idxmax()]+'['+df_proba['Proba'].idxmax(),"]** (proba={:.2f}".format(max(df_proba['Proba']))+")") - prediction = clf_nb2.predict([bow_exemple]) - st.write("Langue prédite par Naiva Bayes"+" "*23+": **:"+lan_color[prediction[0]]+'['+prediction[0]+"]** (proba={:.2f}".format(max(clf_nb2.predict_proba([bow_exemple])[0]))+")") - st.write("") - - fig, axs = plt.subplots(1, 2, figsize=(10, 6)) - df_proba_sorted =df_proba.sort_index(ascending=True) - axs[0].set_title("Probabilités calculée manuellement", fontsize=12) - axs[0].barh(df_proba_sorted.index, df_proba_sorted['Proba']) - axs[1].set_title("Probabilités du classifieur Naive Bayes", fontsize=12) - axs[1].barh(df_proba_sorted.index, clf_nb2.predict_proba([bow_exemple])[0]); - st.pyplot(fig) - return - -#@st.cache_data -def find_exemple(lang_sel): - global exemples - return exemples[lang_sel] - -def display_shapley(lang_sel): - st.write("**Analyse de l'importance de chaque token dans l'identification de la langue**") - st.image('assets/fig_schapley'+str(lang_sel)+'.png') - st.write("**Recapitulatif de l'influence des tokens sur la selection de la langue**") - st.image('assets/fig_schapley_recap'+str(lang_sel)+'.png') - return - -def run(): - global tokenizer, vectorizer, dict_token, dict_ids, nb_token, lan_to_language, clf_nb - global dl_model, label_encoder, toggle_val, custom_sentence, list_lan, lan_identified - global lang_exemples, exemples - - - tokenizer, dict_token, dict_ids, nb_token, lan_to_language, clf_nb, vectorizer = init_nb_identifier() - dl_model, label_encoder, list_lan, lan_identified = init_dl_identifier() - lang_id_model_ext, dict_xlmr, sentence_test, lang_exemples, exemples= init_lang_id_external() - - st.write("") - st.title(title) - st.write("## **Explications :**\n") - st.markdown( - """ - Afin de mettre en oeuvre cette fonctionnalité nous avons utilisé un jeu d'entrainement multilinge de **9.757.778 phrases** dans **95 langues**. - Les 95 langues identifiées sont: - """ - ) - st.selectbox(label="",options=sorted(lan_identified)) - st.markdown(""" - Nous avons utilisé 2 méthodes pour identifier la langue d'un texte: - 1. un classificateur **Naïve Bayes** - 2. un modèle de **Deep Learning** - - Les 2 modèles ont un accuracy similaire sur le jeu de test: **:red[96% pour NB et 97,5% pour DL]** -
      - """ - , unsafe_allow_html=True) - - chosen_id = tab_bar(data=[ - TabBarItemData(id="tab1", title="Id. Naïve Bayes", description="avec le Bag Of Words"), - TabBarItemData(id="tab2", title="Id. Deep Learning", description=" avec Keras"), - TabBarItemData(id="tab3", title="Interpretabilité", description="du modèle Naïve Bayes ")], - default="tab1") - - if (chosen_id == "tab1") or (chosen_id == "tab2"): - st.write("## **Paramètres :**\n") - - toggle_val = st.toggle('Phrase à saisir/Phrase test', value=True, help="Off = phrase à saisir, On = selection d'une phrase test parmi 9500 phraseq") - if toggle_val: - custom_sentence= st.selectbox("Selectionnez une phrases test à identifier:", sentence_test['sentence'] ) - else: - custom_sentence = st.text_area(label="Saisir le texte dont vous souhaitez identifier la langue:") - st.button(label="Valider", type="primary") - - if custom_sentence!='': - st.write("## **Résultats :**\n") - md = """ - |Identifieur |Langue détectée| - |-------------------------------------|---------------|""" - md1 = "" - if toggle_val: - lan_reelle = sentence_test['lan_code'].loc[sentence_test['sentence']==custom_sentence].tolist()[0] - md1 = """ - |Langue réelle |**:blue["""+lan_to_language[lan_reelle]+"""]**|""" - md2 = """ - |Classificateur Naïve Bayes |**:red["""+lang_id_nb(custom_sentence)+"""]**| - |Modèle de Deep Learning |**:red["""+lang_id_dl(custom_sentence)+"""]**|""" - md3 = """ - |XLM-RoBERTa (Hugging Face) |**:red["""+lan_to_language[dict_xlmr[lang_id_model_ext(custom_sentence)[0]['label']]]+"""]**|""" - if toggle_val: - if not (lan_reelle in list(dict_xlmr.values())): - md3="" - - st.markdown(md+md1+md2+md3, unsafe_allow_html=True) - - st.write("## **Details sur la méthode :**\n") - if (chosen_id == "tab1"): - st.markdown( - """ - Afin d'utiliser le classificateur Naïve Bayes, il nous a fallu: - - Créer un Bag of Words de token.. - - ..Tokeniser le texte d'entrainement avec CountVectorizer et un tokenizer 'custom', **Tiktoken** d'OpenAI. - - Utiliser des matrices creuses (Sparse Matrix), car notre BOW contenait 10 M de lignes x 59122 tokens. - - Sauvegarder le vectorizer (non serialisable) et le classificateur entrainé. - - L'execution de toutes ces étapes est assez rapide: une dizaine de minutes -
      - Le résultat est très bon: L'Accuracy sur le jeu de test est = - **:red[96%]** sur les 95 langues, et **:red[99,1%]** sur les 5 langues d'Europe de l'Ouest (en,fr,de,it,sp) -
      - **Note 1:** Les 2 modèles ont un accuracy similaire sur le jeu de test: **:red[96% pour NB et 97,5% pour DL]** - **Note 2:** Le modèle *XLM-RoBERTa* de Hugging Face (qui identifie 20 langues seulement) a une accuracy, sur notre jeu de test = **97,8%**, - versus **99,3% pour NB** et **99,2% pour DL** sur ces 20 langues. - """ - , unsafe_allow_html=True) - else: - st.markdown( - """ - Nous avons mis en oeuvre un modèle Keras avec une couche d'embedding et 4 couches denses *(Voir architecture ci-dessous)*. - Nous avons utilisé le tokeniser **Tiktoken** d'OpenAI. - La couche d'embedding accepte 250 tokens, ce qui signifie que la détection de langue s'effectue sur approximativement les 200 premiers mots. -
      - L'entrainement a duré plus de 10 heures.. - Finalement, le résultat est très bon: L'Accuracy sur le jeu de test est = - **:red[97,5%]** sur les 95 langues, et **:red[99,1%]** sur les 5 langues d'Europe de l'Ouest (en,fr,de,it,sp). - Néanmoins, la durée pour une prédiction est relativement longue: approximativement 5/100 de seconde -
      - **Note 1:** Les 2 modèles ont un accuracy similaire sur le jeu de test: **:red[96% pour NB et 97,5% pour DL]** - **Note 2:** Le modèle *XLM-RoBERTa* de Hugging Face (qui identifie 20 langues seulement) a une accuracy, sur notre jeu de test = **97,8%**, - versus **99,3% pour NB** et **99,2% pour DL** sur ces 20 langues. -
      - """ - , unsafe_allow_html=True) - st.write("
      Architecture du modèle utilisé:
      ", unsafe_allow_html=True) - plot_model(dl_model, show_shapes=True, show_layer_names=True, show_layer_activations=True,rankdir='TB',to_file='./assets/model_plot.png') - col1, col2, col3 = st.columns([0.15,0.7,0.15]) - with col2: - st.image('./assets/model_plot.png',use_column_width="auto") - elif (chosen_id == "tab3"): - st.write("### **Interpretabilité du classifieur Naïve Bayes sur 5 langues**") - st.write("##### ..et un Training set réduit (15000 phrases et 94 tokens)") - st.write("") - - chosen_id2 = tab_bar(data=[ - TabBarItemData(id="tab1", title="Analyse en Compos. Princ.", description=""), - TabBarItemData(id="tab2", title="Simul. calcul NB", description=""), - TabBarItemData(id="tab3", title="Shapley", description="")], - default="tab1") - if (chosen_id2 == "tab1"): - display_acp() - if (chosen_id2 == "tab2") or (chosen_id2 == "tab3"): - sel_phrase = st.selectbox('Selectionnez une phrase à "interpréter":', range(9), format_func=find_exemple) - if (chosen_id2 == "tab2"): - analyse_nb(sel_phrase) - if (chosen_id2 == "tab3"): - display_shapley(sel_phrase) diff --git a/spaces/Djacon/emotion_detection/static/404.html b/spaces/Djacon/emotion_detection/static/404.html deleted file mode 100644 index a0d3b56fb75d55f4594a3e4d6b9a29c4b2563da1..0000000000000000000000000000000000000000 --- a/spaces/Djacon/emotion_detection/static/404.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - - Text2Feature | 404 Page - - - - - - - - - - -
      -
      - - - -
      -
      -
      -
      - - -
      -
      - - - - - - - - - -
      - - -
      -
      - - -
      -
      - -
      -
      -
      -

      404 -

      - - -
      - -
      -
      -

      - 404

      - -
      -
      - -

      Page not found

      - - - - - - - Go Back - -
      -
      -
      -
      -
      -
      - - - - \ No newline at end of file diff --git a/spaces/ECCV2022/storydalle/dalle/trainer_prefix.py b/spaces/ECCV2022/storydalle/dalle/trainer_prefix.py deleted file mode 100644 index 42c696b7ef76f25bc698da4d19b7236cd3e04228..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/storydalle/dalle/trainer_prefix.py +++ /dev/null @@ -1,1629 +0,0 @@ -import inspect -import json -import math -import os -import re -import shutil -import warnings -from contextlib import contextmanager -from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from nltk import word_tokenize -import numpy as np -import torch -from packaging import version -from torch import nn -from torch.utils.data.dataloader import DataLoader -from torch.utils.data.dataset import Dataset -from torch.utils.data.distributed import DistributedSampler -from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler -from tqdm.auto import tqdm, trange -from torch.nn.utils.rnn import pad_sequence -import random - -from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator -from transformers.file_utils import is_datasets_available, is_torch_tpu_available -from transformers.integrations import ( - default_hp_search_backend, - is_comet_available, - is_optuna_available, - is_ray_available, - is_tensorboard_available, - is_wandb_available, - run_hp_search_optuna, - run_hp_search_ray, -) - -from transformers.modeling_utils import PreTrainedModel -from transformers.optimization import AdamW, get_linear_schedule_with_warmup, get_constant_schedule_with_warmup -from transformers.tokenization_utils_base import PreTrainedTokenizerBase -from transformers.trainer_utils import ( - PREFIX_CHECKPOINT_DIR, - BestRun, - EvalPrediction, - EvaluationStrategy, - HPSearchBackend, - PredictionOutput, - TrainOutput, - default_compute_objective, - default_hp_space, - set_seed, -) -from transformers.training_args import TrainingArguments -from transformers.utils import logging - - -_use_native_amp = False -_use_apex = False -EPS = 1e-12 -INIT_GUMBEL_TEMP = 5.0 - -control_lst = ['positive', 'negative', 'neutral'] -Control_Temp = {'positive': 3967, 'negative':4633, 'neutral':8500} -control_Map = [torch.LongTensor([3967]), torch.LongTensor([4633]), torch.LongTensor([8500])] -sst_lst = [(0, 2), (1, 3), (4,)] -sst_standard = ["positive", "negative", "very positive", "very negative", "neutral"] -# Control_?Map = {j:i for i, j in enumerate(control_lst)} - -# Check if Pytorch version >= 1.6 to switch between Native AMP and Apex -if version.parse(torch.__version__) < version.parse("1.6"): - from transformers.file_utils import is_apex_available - - if is_apex_available(): - from apex import amp - _use_apex = True -else: - _use_native_amp = True - from torch.cuda.amp import autocast - -if is_datasets_available(): - import datasets - -if is_torch_tpu_available(): - import torch_xla.core.xla_model as xm - import torch_xla.debug.metrics as met - import torch_xla.distributed.parallel_loader as pl - -if is_tensorboard_available(): - try: - from torch.utils.tensorboard import SummaryWriter - except ImportError: - from tensorboardX import SummaryWriter - -if is_wandb_available(): - import wandb - -if is_comet_available(): - import comet_ml - -if is_optuna_available(): - import optuna - -if is_ray_available(): - from ray import tune - - -logger = logging.get_logger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - - Args: - local_rank (:obj:`int`): The rank of the local process. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - -def helper_token2bpe(offsets): - full_lst = [] - for example_offset in offsets: - bpe2token = [] - token2bpe = [] - token_idx = -1 - # print(example_offset) - for bpe_idx, (a,b) in enumerate(example_offset): - # print(token2bpe, a, b, bpe_idx) - if b - a > 0: - if a == 0: - # new token - token_idx += 1 - bpe2token.append(token_idx) - token2bpe.append([]) - token2bpe[-1].append(bpe_idx) - else: - # prev token. - bpe2token.append(token_idx) - token2bpe[-1].append(bpe_idx) - else: - bpe2token.append(None) - full_lst.append((bpe2token, token2bpe)) - return full_lst - -class SequentialDistributedSampler(Sampler): - """ - Distributed Sampler that subsamples indicies sequentially, - making it easier to collate all results at the end. - - Even though we only use this sampler for eval and predict (no training), - which means that the model params won't have to be synced (i.e. will not hang - for synchronization even if varied number of forward passes), we still add extra - samples to the sampler to make it evenly divisible (like in `DistributedSampler`) - to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. - """ - - def __init__(self, dataset, num_replicas=None, rank=None): - if num_replicas is None: - if not torch.distributed.is_available(): - raise RuntimeError("Requires distributed package to be available") - num_replicas = torch.distributed.get_world_size() - if rank is None: - if not torch.distributed.is_available(): - raise RuntimeError("Requires distributed package to be available") - rank = torch.distributed.get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - indices = list(range(len(self.dataset))) - - # add extra samples to make it evenly divisible - indices += indices[: (self.total_size - len(indices))] - assert ( - len(indices) == self.total_size - ), f"Indices length {len(indices)} and total size {self.total_size} mismatched" - - # subsample - indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] - assert ( - len(indices) == self.num_samples - ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched" - - return iter(indices) - - def __len__(self): - return self.num_samples - - -def get_tpu_sampler(dataset: Dataset): - if xm.xrt_world_size() <= 1: - return RandomSampler(dataset) - return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) - - -class Trainer_Prefix: - """ - Trainer is a simple but feature-complete training and eval loop for PyTorch, - optimized for 🤗 Transformers. - - Args: - model (:class:`~transformers.PreTrainedModel`, `optional`): - The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed. - args (:class:`~transformers.TrainingArguments`, `optional`): - The arguments to tweak for training. Will default to a basic instance of :class:`~transformers.TrainingArguments` - with the ``output_dir`` set to a directory named `tmp_trainer` in the current directory if not provided. - data_collator (:obj:`DataCollator`, `optional`): - The function to use to form a batch from a list of elements of :obj:`train_dataset` or - :obj:`eval_dataset`. Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is - provided, an instance of :func:`~transformers.DataCollatorWithPadding` otherwise. - train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): - The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the - ``model.forward()`` method are automatically removed. - eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): - The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the - ``model.forward()`` method are automatically removed. - tokenizer (:class:`PreTrainedTokenizerBase`, `optional`): - The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the - maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an - interrupted training or reuse the fine-tuned model. - model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`): - A function that instantiates the model to be used. If provided, each call to - :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function. - compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`): - The function that will be used to compute metrics at evaluation. Must take a - :class:`~transformers.EvalPrediction` and return a dictionary string to metric values. - tb_writer (:obj:`SummaryWriter`, `optional`): - Object to write to TensorBoard. - optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): - A tuple containing the optimizer and the scheduler to use. Will default to an instance of - :class:`~transformers.AdamW` on your model and a scheduler given by - :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`. - kwargs: - Deprecated keyword arguments. - """ - - def __init__( - self, - model: Optional[PreTrainedModel] = None, - model_gpt2 : Optional[PreTrainedModel] = None, - args: TrainingArguments = None, - data_collator: Optional[DataCollator] = None, - train_dataset: Optional[Dataset] = None, - eval_dataset: Optional[Dataset] = None, - tokenizer: Optional["PreTrainedTokenizerBase"] = None, - model_init: Callable[[], PreTrainedModel] = None, - compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, - tb_writer: Optional["SummaryWriter"] = None, - optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), - task_mode: Optional[str] = None, - use_dropout: Optional[bool] = False, - distill: Optional[bool] = False, - matching_objective:Optional[str]= None, - finetuned_gpt2: Optional[PreTrainedModel] = None, - **kwargs, - ): - if args is None: - logger.info("No `TrainingArguments` passed, using the current path as `output_dir`.") - args = TrainingArguments("tmp_trainer") - self.args = args - # Seed must be set before instantiating the model when using model - set_seed(self.args.seed) - assert ( - model is not None or model_init is not None - ), "You must provide a model to use `Trainer`, either by using the `model` argument or the `model_init` argument." - assert model_init is None - self.model = model.to(args.device) if model is not None else None - self.gpt2 = model_gpt2.to(args.device) if model_gpt2 is not None else None - default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) - self.data_collator = data_collator if data_collator is not None else default_collator - self.train_dataset = train_dataset - self.eval_dataset = eval_dataset - self.tokenizer = tokenizer - self.model_init = model_init - self.compute_metrics = compute_metrics - self.optimizer, self.lr_scheduler = optimizers - self.task_mode = task_mode - self.use_dropout = use_dropout - - self.curr_best_eval = 10000000. - - self.distill = distill - if self.distill: - self.matching_objective = matching_objective - self.finetuned_gpt2 = finetuned_gpt2 - - if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): - raise RuntimeError( - "Passing a `model_init` is incompatible with providing the `optimizers` argument." - "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." - ) - self.tb_writer = tb_writer - self.log_history = [] - if "prediction_loss_only" in kwargs: - warnings.warn( - "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.", - FutureWarning, - ) - self.args.prediction_loss_only = kwargs.pop("prediction_loss_only") - assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." - - if tb_writer is None and is_tensorboard_available() and self.is_world_process_zero(): - self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir) - if not is_tensorboard_available(): - logger.warning( - "You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it." - ) - - # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. - self._loggers_initialized = False - - # Create output directory if needed - if self.is_world_process_zero(): - os.makedirs(self.args.output_dir, exist_ok=True) - if is_torch_tpu_available(): - # Set an xla_device flag on the model's config. - # We'll find a more elegant and not need to do this in the future. - self.model.config.xla_device = True - if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): - self.data_collator = self.data_collator.collate_batch - warnings.warn( - ( - "The `data_collator` should now be a simple callable (function, class with `__call__`), classes " - + "with a `collate_batch` are deprecated and won't be supported in a future version." - ), - FutureWarning, - ) - - if is_datasets_available(): - if isinstance(train_dataset, datasets.Dataset): - self._remove_unused_columns(self.train_dataset, description="training") - if isinstance(eval_dataset, datasets.Dataset): - self._remove_unused_columns(self.eval_dataset, description="evaluation") - - self.global_step = None - self.epoch = None - self.total_flos = None - if self.args.fp16 and _use_native_amp: - self.scaler = torch.cuda.amp.GradScaler() - self.hp_search_backend = None - self.use_tune_checkpoints = False - if self.args.label_names is None: - self.args.label_names = (["labels"] - ) - - def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): - if not self.args.remove_unused_columns: - return - # Inspect model forward signature to keep only the arguments it accepts. - signature = inspect.signature(self.model.forward) - signature_columns = list(signature.parameters.keys()) - # Labels may be named label or label_ids, the default data collator handles that. - signature_columns += ["label", "label_ids"] - columns = [k for k in signature_columns if k in dataset.column_names] - ignored_columns = list(set(dataset.column_names) - set(signature_columns)) - dset_description = "" if description is None else f"in the {description} set " - logger.info( - f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." - ) - dataset.set_format(type=dataset.format["type"], columns=columns) - - def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: - if isinstance(self.train_dataset, torch.utils.data.IterableDataset): - return None - elif is_torch_tpu_available(): - return get_tpu_sampler(self.train_dataset) - else: - return ( - RandomSampler(self.train_dataset) - if self.args.local_rank == -1 - else DistributedSampler(self.train_dataset) - ) - - def get_train_dataloader(self) -> DataLoader: - """ - Returns the training :class:`~torch.utils.data.DataLoader`. - - Will use no sampler if :obj:`self.train_dataset` is a :obj:`torch.utils.data.IterableDataset`, a random sampler - (adapted to distributed training if necessary) otherwise. - - Subclass and override this method if you want to inject some custom behavior. - """ - if self.train_dataset is None: - raise ValueError("Trainer: training requires a train_dataset.") - train_sampler = self._get_train_sampler() - - return DataLoader( - self.train_dataset, - batch_size=self.args.train_batch_size, - sampler=train_sampler, - collate_fn=self.data_collator, - drop_last=self.args.dataloader_drop_last, - num_workers=self.args.dataloader_num_workers, - worker_init_fn=np.random.seed(self.args.seed) - ) - - def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]: - if isinstance(eval_dataset, torch.utils.data.IterableDataset): - return None - elif is_torch_tpu_available(): - return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) - elif self.args.local_rank != -1: - return SequentialDistributedSampler(eval_dataset) - else: - return SequentialSampler(eval_dataset) - - def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: - """ - Returns the evaluation :class:`~torch.utils.data.DataLoader`. - - Will use no sampler if :obj:`self.eval_dataset` is a :obj:`torch.utils.data.IterableDataset`, a sequential - sampler (adapted to distributed training if necessary) otherwise. - - Subclass and override this method if you want to inject some custom behavior. - - Args: - eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): - If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not - accepted by the ``model.forward()`` method are automatically removed. - """ - if eval_dataset is None and self.eval_dataset is None: - raise ValueError("Trainer: evaluation requires an eval_dataset.") - elif eval_dataset is not None and is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): - self._remove_unused_columns(eval_dataset, description="evaluation") - eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset - eval_sampler = self._get_eval_sampler(eval_dataset) - - return DataLoader( - eval_dataset, - sampler=eval_sampler, - batch_size=self.args.eval_batch_size, - collate_fn=self.data_collator, - drop_last=self.args.dataloader_drop_last, - num_workers=self.args.dataloader_num_workers, - worker_init_fn=np.random.seed(self.args.seed) - ) - - def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: - """ - Returns the test :class:`~torch.utils.data.DataLoader`. - - Will use no sampler if :obj:`test_dataset` is a :obj:`torch.utils.data.IterableDataset`, a sequential - sampler (adapted to distributed training if necessary) otherwise. - - Subclass and override this method if you want to inject some custom behavior. - - Args: - eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): - The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the - ``model.forward()`` method are automatically removed. - """ - if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): - self._remove_unused_columns(test_dataset, description="test") - test_sampler = self._get_eval_sampler(test_dataset) - - # We use the same batch_size as for eval. - return DataLoader( - test_dataset, - sampler=test_sampler, - batch_size=self.args.eval_batch_size, - collate_fn=self.data_collator, - drop_last=self.args.dataloader_drop_last, - worker_init_fn=np.random.seed(self.args.seed) - ) - - def create_optimizer_and_scheduler(self, num_training_steps: int): - """ - Setup the optimizer and the learning rate scheduler. - - We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the - Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass. - """ - if self.optimizer is None: - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in self.model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad], - "weight_decay": self.args.weight_decay, - }, - { - "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], - "weight_decay": 0.0, - }, - ] - - self.optimizer = AdamW( - optimizer_grouped_parameters, - lr=self.args.learning_rate, - betas=(self.args.adam_beta1, self.args.adam_beta2), - eps=self.args.adam_epsilon, - ) - - - # for n, p in self.model.named_parameters(): - # print(n,p.requires_grad) - print(self.optimizer.state_dict()) - if self.lr_scheduler is None: - self.lr_scheduler = get_linear_schedule_with_warmup( - self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps - ) - - - def setup_wandb(self): - """ - Setup the optional Weights & Biases (`wandb`) integration. - - One can subclass and override this method to customize the setup if needed. Find more information - `here `__. You can also override the following environment variables: - - Environment: - WANDB_WATCH: - (Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging - or "all" to log gradients and parameters - WANDB_PROJECT: - (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project - WANDB_DISABLED: - (Optional): boolean - defaults to false, set to "true" to disable wandb entirely - """ - if hasattr(self, "_setup_wandb"): - warnings.warn( - "The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.", - FutureWarning, - ) - return self._setup_wandb() - - if self.is_world_process_zero(): - logger.info( - 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' - ) - try: - combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()} - except AttributeError: - # in case the model has no config - combined_dict = {**self.args.to_sanitized_dict()} - wandb.init( - project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name - ) - # keep track of model topology and gradients, unsupported on TPU - if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false": - wandb.watch( - self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.logging_steps) - ) - - def setup_comet(self): - """ - Setup the optional Comet.ml integration. - - Environment: - COMET_MODE: - (Optional): str - "OFFLINE", "ONLINE", or "DISABLED" - COMET_PROJECT_NAME: - (Optional): str - Comet.ml project name for experiments - COMET_OFFLINE_DIRECTORY: - (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE" - - For a number of configurable items in the environment, - see `here `__ - """ - if self.is_world_master(): - comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() - args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} - experiment = None - if comet_mode == "ONLINE": - experiment = comet_ml.Experiment(**args) - logger.info("Automatic Comet.ml online logging enabled") - elif comet_mode == "OFFLINE": - args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") - experiment = comet_ml.OfflineExperiment(**args) - logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") - if experiment is not None: - experiment._set_model_graph(self.model, framework="transformers") - experiment._log_parameters(self.args, prefix="args/", framework="transformers") - experiment._log_parameters(self.model.config, prefix="config/", framework="transformers") - - def num_examples(self, dataloader: DataLoader) -> int: - """ - Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset. - """ - return len(dataloader.dataset) - - def _setup_loggers(self): - if self._loggers_initialized: - return - if is_wandb_available(): - self.setup_wandb() - elif os.environ.get("WANDB_DISABLED") != "true": - logger.info( - "You are instantiating a Trainer but W&B is not installed. To use wandb logging, " - "run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface." - ) - if is_comet_available(): - self.setup_comet() - elif os.environ.get("COMET_MODE") != "DISABLED": - logger.info( - "To use comet_ml logging, run `pip/conda install comet_ml` " - "see https://www.comet.ml/docs/python-sdk/huggingface/" - ) - self._loggers_initialized = True - - def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): - """ HP search setup code """ - if self.hp_search_backend is None or trial is None: - return - params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial - for key, value in params.items(): - if not hasattr(self.args, key): - raise AttributeError( - f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`." - ) - old_attr = getattr(self.args, key, None) - # Casting value to the proper type - if old_attr is not None: - value = type(old_attr)(value) - setattr(self.args, key, value) - if self.hp_search_backend == HPSearchBackend.OPTUNA: - logger.info("Trial:", trial.params) - - def _report_to_hp_search( - self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float] - ): - if self.hp_search_backend is None or trial is None: - return - self.objective = self.compute_objective(metrics) - if self.hp_search_backend == HPSearchBackend.OPTUNA: - trial.report(self.objective, epoch) - if trial.should_prune(): - raise optuna.TrialPruned() - elif self.hp_search_backend == HPSearchBackend.RAY: - if self.global_step % self.args.save_steps == 0: - self._tune_save_checkpoint() - tune.report(objective=self.objective, **metrics) - - def _tune_save_checkpoint(self): - if not self.use_tune_checkpoints: - return - with tune.checkpoint_dir(step=self.global_step) as checkpoint_dir: - self.args.output_dir = checkpoint_dir - output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}") - self.save_model(output_dir) - if self.is_world_master(): - torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - - - def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None): - """ - Main training entry point. - - Args: - model_path (:obj:`str`, `optional`): - Local path to the model if the model to train has been instantiated from a local path. If present, - training will resume from the optimizer/scheduler states loaded here. - trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`): - The trial run or the hyperparameter dictionary for hyperparameter search. - """ - # This might change the seed so needs to run first. - self._hp_search_setup(trial) - - # Model re-init - if self.model_init is not None: - # Seed must be set before instantiating the model when using model_init. - set_seed(self.args.seed) - model = self.model_init() - self.model = model.to(self.args.device) - - # Reinitializes optimizer and scheduler - self.optimizer, self.lr_scheduler = None, None - - # Data loader and number of training steps - train_dataloader = self.get_train_dataloader() - num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps - num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) - if self.args.max_steps > 0: - t_total = self.args.max_steps - num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int( - self.args.max_steps % num_update_steps_per_epoch > 0 - ) - else: - t_total = int(num_update_steps_per_epoch * self.args.num_train_epochs) - num_train_epochs = self.args.num_train_epochs - self.args.max_steps = t_total - - self.create_optimizer_and_scheduler(num_training_steps=t_total) - - # Check if saved optimizer or scheduler states exist - if ( - model_path is not None - and os.path.isfile(os.path.join(model_path, "optimizer.pt")) - and os.path.isfile(os.path.join(model_path, "scheduler.pt")) - ): - # Load in optimizer and scheduler states - self.optimizer.load_state_dict( - torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device) - ) - self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt"))) - - model = self.model - if self.args.fp16 and _use_apex: - if not is_apex_available(): - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if self.args.n_gpu > 1: - model = torch.nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if self.args.local_rank != -1: - model = torch.nn.parallel.DistributedDataParallel( - model, - device_ids=[self.args.local_rank], - output_device=self.args.local_rank, - find_unused_parameters=True, - ) - - if self.tb_writer is not None: - self.tb_writer.add_text("args", self.args.to_json_string()) - self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={}) - - # Train! - if is_torch_tpu_available(): - total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size() - else: - total_train_batch_size = ( - self.args.train_batch_size - * self.args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1) - ) - logger.info("***** Running training *****") - logger.info(" Num examples = %d", self.num_examples(train_dataloader)) - logger.info(" Num Epochs = %d", num_train_epochs) - logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size) - logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size) - logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - self.global_step = 0 - self.epoch = 0 - self.total_flos = 0 - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - # Check if continuing training from a checkpoint - if model_path is not None: - # set global_step to global_step of last saved checkpoint from model path - try: - self.global_step = int(model_path.split("-")[-1].split(os.path.sep)[0]) - # print(model, model.module) - if self.args.n_gpu > 1: - self.total_flos = getattr(model.module.config, "total_flos", 0) - else: - self.total_flos = getattr(model.config, "total_flos", 0) - - epochs_trained = self.global_step // num_update_steps_per_epoch - steps_trained_in_current_epoch = self.global_step % (num_update_steps_per_epoch) - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(" Continuing training from epoch %d", epochs_trained) - logger.info(" Continuing training from global step %d", self.global_step) - logger.info(" Continuing training from %d non-embedding floating-point operations", self.total_flos) - logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) - except ValueError: - self.global_step = 0 - self.total_flos = 0 - logger.info(" Starting fine-tuning.") - - tr_loss = torch.tensor(0.0).to(self.args.device) - logging_loss_scalar = 0.0 - model.zero_grad() - disable_tqdm = self.args.disable_tqdm or not self.is_local_process_zero() - train_pbar = trange(epochs_trained, int(np.ceil(num_train_epochs)), desc="Epoch", disable=disable_tqdm) - for epoch in range(epochs_trained, int(np.ceil(num_train_epochs))): - if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): - train_dataloader.sampler.set_epoch(epoch) - - if is_torch_tpu_available(): - parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader( - self.args.device - ) - epoch_iterator = parallel_loader - else: - epoch_iterator = train_dataloader - - # Reset the past mems state at the beginning of each epoch if necessary. - if self.args.past_index >= 0: - self._past = None - - epoch_pbar = tqdm(epoch_iterator, desc="Iteration", disable=disable_tqdm) - for step, inputs in enumerate(epoch_iterator): - - # Skip past any already trained steps if resuming training - if steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - epoch_pbar.update(1) - continue - - tr_loss += self.training_step(model, inputs) - - self.total_flos += self.floating_point_ops(inputs) - - if (step + 1) % self.args.gradient_accumulation_steps == 0 or ( - # last step in epoch but step is always smaller than gradient_accumulation_steps - len(epoch_iterator) <= self.args.gradient_accumulation_steps - and (step + 1) == len(epoch_iterator) - ): - if self.args.fp16 and _use_native_amp: - self.scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) - elif self.args.fp16 and _use_apex: - torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm) - else: - torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) - - if is_torch_tpu_available(): - xm.optimizer_step(self.optimizer) - elif self.args.fp16 and _use_native_amp: - self.scaler.step(self.optimizer) - self.scaler.update() - else: - self.optimizer.step() - - # URGENT - self.lr_scheduler.step() - model.zero_grad() - self.global_step += 1 - self.epoch = epoch + (step + 1) / len(epoch_iterator) - - - if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or ( - self.global_step == 1 and self.args.logging_first_step - ): - logs: Dict[str, float] = {} - tr_loss_scalar = tr_loss.item() - logs["loss"] = (tr_loss_scalar - logging_loss_scalar) / self.args.logging_steps - # backward compatibility for pytorch schedulers - logs["learning_rate"] = ( - self.lr_scheduler.get_last_lr()[0] - if version.parse(torch.__version__) >= version.parse("1.4") - else self.lr_scheduler.get_lr()[0] - ) - logging_loss_scalar = tr_loss_scalar - - self.log(logs) - - # print(self.args.evaluation_strategy == EvaluationStrategy.STEPS ) - # print(self.global_step % self.args.eval_steps == 0) - # print() - - if ( - self.args.evaluation_strategy == EvaluationStrategy.STEPS - and self.global_step % self.args.eval_steps == 0 - ): - metrics = self.evaluate() - self._report_to_hp_search(trial, epoch, metrics) - - #############################EARLY STOPPING######################## - if 'lowdata' in self.args.output_dir or 'earlystop' in self.args.output_dir: - self.save_based_on_eval = True - else: - self.save_based_on_eval = False - print('if not see a line lowdata: below, then did not go into low data. ') - if self.save_based_on_eval and metrics["eval_loss"] < self.curr_best_eval: - print('lowdata:', self.global_step, self.curr_best_eval, metrics["eval_loss"], - 'perplexity={}'.format(math.exp(metrics["eval_loss"]))) - self.curr_best_eval = metrics["eval_loss"] - if hasattr(model, "module"): - assert ( - model.module is self.model - ), f"Module {model.module} should be a reference to self.model" - else: - assert model is self.model, f"Model {model} should be a reference to self.model" - # Save model checkpoint - output_dir_name = os.path.basename(self.args.output_dir) - checkpoint_folder = f"{output_dir_name}-{PREFIX_CHECKPOINT_DIR}-{self.global_step}" - if self.hp_search_backend is not None and trial is not None: - run_id = ( - trial.number - if self.hp_search_backend == HPSearchBackend.OPTUNA - else tune.get_trial_id() - ) - checkpoint_folder += f"-run-{run_id}" - output_dir = os.path.join(self.args.output_dir, checkpoint_folder) - - self.store_flos() - print('saving to output_dir', output_dir) - self.save_model(output_dir) - - if self.is_world_process_zero(): - self._rotate_checkpoints(use_mtime=True) - ##################################################### - - if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0: - print('saving model at a checkpoint!!') - # In all cases (even distributed/parallel), self.model is always a reference - # to the model we want to save. - if hasattr(model, "module"): - assert ( - model.module is self.model - ), f"Module {model.module} should be a reference to self.model" - else: - assert model is self.model, f"Model {model} should be a reference to self.model" - # Save model checkpoint - checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}" - if self.hp_search_backend is not None and trial is not None: - run_id = ( - trial.number - if self.hp_search_backend == HPSearchBackend.OPTUNA - else tune.get_trial_id() - ) - checkpoint_folder += f"-run-{run_id}" - output_dir = os.path.join(self.args.output_dir, checkpoint_folder) - - self.store_flos() - - self.save_model(output_dir) - - if self.is_world_process_zero(): - self._rotate_checkpoints(use_mtime=True) - - if is_torch_tpu_available(): - xm.rendezvous("saving_optimizer_states") - xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - elif self.is_world_process_zero(): - torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - - epoch_pbar.update(1) - if self.args.max_steps > 0 and self.global_step >= self.args.max_steps: - break - epoch_pbar.close() - train_pbar.update(1) - - if self.args.evaluation_strategy == EvaluationStrategy.EPOCH: - metrics = self.evaluate() - self._report_to_hp_search(trial, epoch, metrics) - - if self.args.tpu_metrics_debug or self.args.debug: - if is_torch_tpu_available(): - # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) - xm.master_print(met.metrics_report()) - else: - logger.warning( - "You enabled PyTorch/XLA debug metrics but you don't have a TPU " - "configured. Check your training configuration if this is unexpected." - ) - if self.args.max_steps > 0 and self.global_step >= self.args.max_steps: - break - - train_pbar.close() - if self.tb_writer: - self.tb_writer.close() - if self.args.past_index and hasattr(self, "_past"): - # Clean the state at the end of training - delattr(self, "_past") - - logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") - return TrainOutput(self.global_step, tr_loss.item() / self.global_step) - - def hyperparameter_search( - self, - hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, - compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, - n_trials: int = 20, - direction: str = "minimize", - backend: Optional[Union["str", HPSearchBackend]] = None, - **kwargs - ) -> BestRun: - """ - Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by - :obj:`compute_objectie`, which defaults to a function returning the evaluation loss when no metric is provided, - the sum of all metrics otherwise. - - .. warning:: - - To use this method, you need to have provided a ``model_init`` when initializing your - :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible - with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the - method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler. - - Args: - hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`): - A function that defines the hyperparameter search space. Will default to - :func:`~transformers.trainer_utils.default_hp_space_optuna` or - :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend. - compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`): - A function computing the objective to minimize or maximize from the metrics returned by the - :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`. - n_trials (:obj:`int`, `optional`, defaults to 100): - The number of trial runs to test. - direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`): - Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should - pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or - several metrics. - backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`): - The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which - one is installed. If both are installed, will default to optuna. - kwargs: - Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For - more information see: - - - the documentation of `optuna.create_study `__ - - the documentation of `tune.run `__ - - Returns: - :class:`transformers.trainer_utils.BestRun`: All the informations about the best run. - """ - if backend is None: - backend = default_hp_search_backend() - if backend is None: - raise RuntimeError( - "At least one of optuna or ray should be installed. " - "To install optuna run `pip install optuna`." - "To install ray run `pip install ray[tune]`." - ) - backend = HPSearchBackend(backend) - if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): - raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") - if backend == HPSearchBackend.RAY and not is_ray_available(): - raise RuntimeError( - "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." - ) - self.hp_search_backend = backend - - if self.model_init is None: - raise RuntimeError( - "To use hyperparameter search, you need to pass your model through a model_init function." - ) - - self.hp_space = default_hp_space[backend] if hp_space is None else hp_space - self.compute_objective = default_compute_objective if compute_objective is None else compute_objective - - run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray - best_run = run_hp_search(self, n_trials, direction, **kwargs) - - self.hp_search_backend = None - return best_run - - def log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None: - """ - Log :obj:`logs` on the various objects watching training. - - Subclass and override this method to inject custom behavior. - - Args: - logs (:obj:`Dict[str, float]`): - The values to log. - iterator (:obj:`tqdm`, `optional`): - A potential tqdm progress bar to write the logs on. - """ - # Set up loggers like W&B or Comet ML - self._setup_loggers() - - if hasattr(self, "_log"): - warnings.warn( - "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", - FutureWarning, - ) - return self._log(logs, iterator=iterator) - - if self.epoch is not None: - logs["epoch"] = self.epoch - if self.total_flos is not None: - if self.args.local_rank != -1: - total_flos = distributed_broadcast_scalars([self.total_flos]).sum().item() - else: - total_flos = self.total_flos - if total_flos > 0: - logs["total_flos"] = self.total_flos - if self.global_step is None: - # when logging evaluation metrics without training - self.global_step = 0 - if self.tb_writer: - for k, v in logs.items(): - if isinstance(v, (int, float)): - self.tb_writer.add_scalar(k, v, self.global_step) - else: - logger.warning( - "Trainer is attempting to log a value of " - '"%s" of type %s for key "%s" as a scalar. ' - "This invocation of Tensorboard's writer.add_scalar() " - "is incorrect so we dropped this attribute.", - v, - type(v), - k, - ) - self.tb_writer.flush() - if is_wandb_available(): - if self.is_world_process_zero(): - wandb.log(logs, step=self.global_step) - if is_comet_available(): - if self.is_world_process_zero(): - experiment = comet_ml.config.get_global_experiment() - if experiment is not None: - experiment._log_metrics(logs, step=self.global_step, epoch=self.epoch, framework="transformers") - output = {**logs, **{"step": self.global_step}} - if self.is_world_process_zero(): - self.log_history.append(output) - if iterator is not None: - iterator.write(output) - else: - print(output) - - def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: - """ - Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and - handling potential state. - """ - for k, v in inputs.items(): - if isinstance(v, torch.Tensor): - inputs[k] = v.to(self.args.device) - - if self.args.past_index >= 0 and self._past is not None: - assert False - inputs["mems"] = self._past - - return inputs - - def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: - """ - Perform a training step on a batch of inputs. - - Subclass and override to inject custom behavior. - - Args: - model (:obj:`nn.Module`): - The model to train. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - The dictionary will be unpacked before being fed to the model. Most models expect the targets under the - argument :obj:`labels`. Check your model's documentation for all accepted arguments. - - Return: - :obj:`torch.Tensor`: The tensor with training loss on this batch. - """ - if hasattr(self, "_training_step"): - warnings.warn( - "The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.", - FutureWarning, - ) - return self._training_step(model, inputs, self.optimizer) - - model.train() - if self.use_dropout: - if self.gpt2 is not None: - self.gpt2.train() - inputs = self._prepare_inputs(inputs) - - if self.args.fp16 and _use_native_amp: - with autocast(): - if self.distill: - loss = self.compute_loss_distill(model, inputs, gpt2_model=self.gpt2, ) - else: - loss = self.compute_loss(model, inputs, gpt2_model=self.gpt2) - else: - if self.distill: - loss = self.compute_loss_distill(model, inputs, gpt2_model=self.gpt2) - else: - loss = self.compute_loss(model, inputs, gpt2_model=self.gpt2) - - if self.args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel training - - if self.args.gradient_accumulation_steps > 1: - loss = loss / self.args.gradient_accumulation_steps - - if self.args.fp16 and _use_native_amp: - self.scaler.scale(loss).backward() - elif self.args.fp16 and _use_apex: - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - else: - # print(loss) - loss.backward() - - # print('max allocated_memory:', torch.cuda.max_memory_allocated(0), 'total_memory:', torch.cuda.get_device_properties(0).total_memory, - # 'percentage', torch.cuda.max_memory_allocated(0)/torch.cuda.get_device_properties(0).total_memory) - - - return loss.detach() - - - - - - def compute_loss(self, model, inputs, gpt2_model=None): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - - Subclass and override for custom behavior. - """ - # outputs = model.forward_weighted(**inputs) - if 'prompt_lab' in inputs: - prompt_lab_ = inputs['prompt_lab'] - k = torch.cat(self.discri_labels_code, dim=0) - inputs['control_code'] = torch.index_select(k, 0, prompt_lab_) - del inputs['prompt_lab'] - - outputs = model(**inputs, gpt2_model=gpt2_model) - # Save past state if it exists - if self.args.past_index >= 0: - self._past = outputs[self.args.past_index] - - # print(outputs[0]) - # We don't use .loss here since the model may return tuples instead of ModelOutput. - # print(outputs[0], outputs.loss) - # URGENT - # print('compute_loss', outputs[0]) - return outputs[0].mean() - - def compute_loss_distill(self, model, inputs, gpt2_model=None): - """ - How the loss is computed by Trainer. By default, all models return the loss in the first element. - - Subclass and override for custom behavior. - """ - # outputs = model.forward_weighted(**inputs) - - with torch.no_grad(): - output_finetuned = self.finetuned_gpt2(**inputs) - - outputs = model(**inputs, gpt2_model=gpt2_model) - # Save past state if it exists - if self.args.past_index >= 0: - self._past = outputs[self.args.past_index] - - if self.matching_objective == 'kl': - # distrib_finetuned=torch.log_softmax(output_finetuned.logits[:,:,:-2], dim=-1) #bsz, seqlen, vocab - distrib_finetuned=torch.log_softmax(output_finetuned.logits, dim=-1) #bsz, seqlen, vocab - distrib_prefix = torch.log_softmax(outputs.logits, dim=-1) # bsz, seqlen, vocab - loss = torch.sum(distrib_finetuned.exp() * (distrib_finetuned - distrib_prefix), dim=-1) #bsz, seqlen - - elif self.matching_objective == 'logits': - loss = torch.norm(output_finetuned.logits - outputs.logits, dim=-1) #bsz, seqlen - # loss = torch.norm(output_finetuned.logits[:,:,:-2] - outputs.logits, dim=-1) #bsz, seqlen - - elif self.matching_objective == 'last_layer': - activation_diff = output_finetuned.last_hidden_state - outputs.last_hidden_state - loss = torch.norm(activation_diff, dim=-1) # bsz, seqlen - else: - assert False, "invalid matching_objective" - - return loss.sum(dim=-1).mean() - - def is_local_master(self) -> bool: - """ - Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on - several machines) main process. - - .. warning:: - - This method is deprecated, use :meth:`~transformers.Trainer.is_local_process_zero` instead. - """ - warnings.warn("This method is deprecated, use `Trainer.is_local_process_zero()` instead.", FutureWarning) - return self.is_local_process_zero() - - def is_local_process_zero(self) -> bool: - """ - Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on - several machines) main process. - """ - if is_torch_tpu_available(): - return xm.is_master_ordinal(local=True) - else: - return self.args.local_rank in [-1, 0] - - def is_world_master(self) -> bool: - """ - Whether or not this process is the global main process (when training in a distributed fashion on - several machines, this is only going to be :obj:`True` for one process). - - .. warning:: - - This method is deprecated, use :meth:`~transformers.Trainer.is_world_process_zero` instead. - """ - warnings.warn("This method is deprecated, use `Trainer.is_world_process_zero()` instead.", FutureWarning) - return self.is_world_process_zero() - - def is_world_process_zero(self) -> bool: - """ - Whether or not this process is the global main process (when training in a distributed fashion on - several machines, this is only going to be :obj:`True` for one process). - """ - if is_torch_tpu_available(): - return xm.is_master_ordinal(local=False) - else: - return self.args.local_rank == -1 or torch.distributed.get_rank() == 0 - - def save_model(self, output_dir: Optional[str] = None): - """ - Will save the model, so you can reload it using :obj:`from_pretrained()`. - - Will only save from the world_master process (unless in TPUs). - """ - - if is_torch_tpu_available(): - self._save_tpu(output_dir) - elif self.is_world_process_zero(): - self._save(output_dir) - - def _save_tpu(self, output_dir: Optional[str] = None): - output_dir = output_dir if output_dir is not None else self.args.output_dir - logger.info("Saving model checkpoint to %s", output_dir) - - if xm.is_master_ordinal(): - os.makedirs(output_dir, exist_ok=True) - torch.save(self.args, os.path.join(output_dir, "training_args.bin")) - json.dump( - self.log_history, open(os.path.join(output_dir, "log_history.json"), "w"), indent=2, ensure_ascii=False - ) - - # Save a trained model and configuration using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - if not isinstance(self.model, PreTrainedModel): - raise ValueError("Trainer.model appears to not be a PreTrainedModel") - - xm.rendezvous("saving_checkpoint") - self.model.save_pretrained(output_dir) - if self.tokenizer is not None: - self.tokenizer.save_pretrained(output_dir) - - def _save(self, output_dir: Optional[str] = None): - output_dir = output_dir if output_dir is not None else self.args.output_dir - os.makedirs(output_dir, exist_ok=True) - logger.info("Saving model checkpoint to %s", output_dir) - # Save a trained model and configuration using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - if not isinstance(self.model, PreTrainedModel): - raise ValueError("Trainer.model appears to not be a PreTrainedModel") - self.model.save_pretrained(output_dir) - if self.tokenizer is not None: - self.tokenizer.save_pretrained(output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(self.args, os.path.join(output_dir, "training_args.bin")) - json.dump( - self.log_history, open(os.path.join(output_dir, "log_history.json"), "w"), indent=2, ensure_ascii=False - ) - - def store_flos(self): - # Storing the number of floating-point operations that went into the model - if self.total_flos is not None: - if self.args.local_rank != -1: - total_flos = distributed_broadcast_scalars([self.total_flos]).sum().item() - else: - total_flos = self.total_flos - if total_flos > 0: - self.model.config.total_flos = total_flos - - def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: - output_dir_name = os.path.basename(self.args.output_dir) - checkpoint_prefix = f"{output_dir_name}-{PREFIX_CHECKPOINT_DIR}" - - ordering_and_checkpoint_path = [] - - glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")] - - for path in glob_checkpoints: - if use_mtime: - ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) - else: - regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) - if regex_match and regex_match.groups(): - ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) - - checkpoints_sorted = sorted(ordering_and_checkpoint_path) - checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] - return checkpoints_sorted - - def _rotate_checkpoints(self, use_mtime=False) -> None: - if self.args.save_total_limit is None or self.args.save_total_limit <= 0: - return - - # Check if we should delete older checkpoint(s) - checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime) - if len(checkpoints_sorted) <= self.args.save_total_limit: - return - - number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit) - checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] - for checkpoint in checkpoints_to_be_deleted: - logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) - shutil.rmtree(checkpoint) - - def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]: - """ - Run evaluation and returns metrics. - - The calling script will be responsible for providing a method to compute metrics, as they are - task-dependent (pass it to the init :obj:`compute_metrics` argument). - - You can also subclass and override this method to inject custom behavior. - - Args: - eval_dataset (:obj:`Dataset`, `optional`): - Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, - columns not accepted by the ``model.forward()`` method are automatically removed. - - Returns: - A dictionary containing the evaluation loss and the potential metrics computed from the predictions. - """ - eval_dataloader = self.get_eval_dataloader(eval_dataset) - - output = self.prediction_loop(eval_dataloader, description="Evaluation") - - self.log(output.metrics) - - if self.args.tpu_metrics_debug or self.args.debug: - # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) - xm.master_print(met.metrics_report()) - - return output.metrics - - - - def predict(self, test_dataset: Dataset) -> PredictionOutput: - """ - Run prediction and returns predictions and potential metrics. - - Depending on the dataset and your use case, your test dataset may contain labels. - In that case, this method will also return metrics, like in :obj:`evaluate()`. - - Args: - test_dataset (:obj:`Dataset`): - Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the - ``model.forward()`` method are automatically removed. - - Returns: - `NamedTuple`: - predictions (:obj:`np.ndarray`): - The predictions on :obj:`test_dataset`. - label_ids (:obj:`np.ndarray`, `optional`): - The labels (if the dataset contained some). - metrics (:obj:`Dict[str, float]`, `optional`): - The potential dictionary of metrics (if the dataset contained labels). - """ - test_dataloader = self.get_test_dataloader(test_dataset) - - return self.prediction_loop(test_dataloader, description="Prediction") - - def prediction_loop( - self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None - ) -> PredictionOutput: - """ - Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`. - - Works both with or without labels. - """ - if hasattr(self, "_prediction_loop"): - warnings.warn( - "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", - FutureWarning, - ) - return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only) - - prediction_loss_only = ( - prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only - ) - - assert not getattr( - self.model.config, "output_attentions", False - ), "The prediction loop does not work with `output_attentions=True`." - assert not getattr( - self.model.config, "output_hidden_states", False - ), "The prediction loop does not work with `output_hidden_states=True`." - - model = self.model - # multi-gpu eval - if self.args.n_gpu > 1: - model = torch.nn.DataParallel(model) - else: - model = self.model - # Note: in torch.distributed mode, there's no point in wrapping the model - # inside a DistributedDataParallel as we'll be under `no_grad` anyways. - - batch_size = dataloader.batch_size - logger.info("***** Running %s *****", description) - logger.info(" Num examples = %d", self.num_examples(dataloader)) - logger.info(" Batch size = %d", batch_size) - eval_losses: List[float] = [] - preds: torch.Tensor = None - label_ids: torch.Tensor = None - entropy_losses: List[float] = [] - model.eval() - if self.gpt2 is not None: - self.gpt2.eval() - - print(model.training) - print(self.gpt2.training) - - if is_torch_tpu_available(): - dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device) - - if self.args.past_index >= 0: - self._past = None - - disable_tqdm = not self.is_local_process_zero() or self.args.disable_tqdm - for inputs in tqdm(dataloader, desc=description, disable=disable_tqdm): - loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only) - batch_size = inputs[list(inputs.keys())[0]].shape[0] - if loss is not None: - eval_losses.extend([loss] * batch_size) - if logits is not None: - preds = logits if preds is None else nested_concat(preds, logits, dim=0) - temp_logits = [torch.log_softmax(x) for x in logits] - entropy_losses.extend([(x.exp() * x).sum() for x in temp_logits]) - if labels is not None: - label_ids = labels if label_ids is None else nested_concat(label_ids, labels, dim=0) - - if self.args.past_index and hasattr(self, "_past"): - # Clean the state at the end of the evaluation loop - delattr(self, "_past") - - - - if self.compute_metrics is not None and preds is not None and label_ids is not None: - metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) - else: - metrics = {} - - # Prefix all keys with eval_ - for key in list(metrics.keys()): - if not key.startswith("eval_"): - metrics[f"eval_{key}"] = metrics.pop(key) - if len(entropy_losses) > 0: - metrics['entropy'] = np.mean(entropy_losses) - print('entropy', metrics['entropy'] ) - - return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) - - def prediction_step( - self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool - ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: - """ - Perform an evaluation step on :obj:`model` using obj:`inputs`. - - Subclass and override to inject custom behavior. - - Args: - model (:obj:`nn.Module`): - The model to evaluate. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - The dictionary will be unpacked before being fed to the model. Most models expect the targets under the - argument :obj:`labels`. Check your model's documentation for all accepted arguments. - prediction_loss_only (:obj:`bool`): - Whether or not to return the loss only. - - Return: - Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: - A tuple with the loss, logits and labels (each being optional). - """ - has_labels = all(inputs.get(k) is not None for k in self.args.label_names) - inputs = self._prepare_inputs(inputs) - - # At eval time, set the weights to 1/bsz. and see the results.. - - # if 'weights' in inputs: - # weights = inputs['weights'] - # bsz = weights.view(-1).shape[0] - # weights = (torch.ones(weights.shape)/bsz).to(weights.device) - # inputs['weights'] = weights - - with torch.no_grad(): - # outputs = model.forward_weighted(**inputs) - outputs = model(**inputs, gpt2_model=self.gpt2) - if has_labels: - # The .mean() is to reduce in case of distributed training - loss = outputs[0].mean().item() - logits = outputs[1:] - else: - loss = None - # Slicing so we get a tuple even if `outputs` is a `ModelOutput`. - logits = outputs[:] - if self.args.past_index >= 0: - self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1] - - if prediction_loss_only: - return (loss, None, None) - - logits = tuple(logit.detach() for logit in logits) - if len(logits) == 1: - logits = logits[0] - - if has_labels: - labels = tuple(inputs.get(name).detach() for name in self.args.label_names) - if len(labels) == 1: - labels = labels[0] - else: - labels = None - - return (loss, logits, labels) - - def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): - """ - For models that inherit from :class:`~transformers.PretrainedModel`, uses - that method to compute the number of floating point operations for every backward + forward pass. If using - another model, either implement such a method in the model or subclass and override this method. - - Args: - model (:obj:`nn.Module`): - The model to evaluate. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - Returns: - :obj:`int`: The number of floating-point operations. - """ - - if isinstance(self.model, torch.nn.DataParallel) or isinstance( - self.model, torch.nn.parallel.DistributedDataParallel - ): - model = self.model.module - else: - model = self.model - - if hasattr(model, "floating_point_ops"): - return model.floating_point_ops(inputs) - - else: - return 0 \ No newline at end of file diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_mapillary_vistas_panoptic.py b/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_mapillary_vistas_panoptic.py deleted file mode 100644 index 0123185583f03ba1715da6e0b1eb24f71c12adda..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_mapillary_vistas_panoptic.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import json -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.utils.file_io import PathManager - - -MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [ - {'color': [165, 42, 42], - 'id': 1, - 'isthing': 1, - 'name': 'Bird', - 'supercategory': 'animal--bird'}, - {'color': [0, 192, 0], - 'id': 2, - 'isthing': 1, - 'name': 'Ground Animal', - 'supercategory': 'animal--ground-animal'}, - {'color': [196, 196, 196], - 'id': 3, - 'isthing': 0, - 'name': 'Curb', - 'supercategory': 'construction--barrier--curb'}, - {'color': [190, 153, 153], - 'id': 4, - 'isthing': 0, - 'name': 'Fence', - 'supercategory': 'construction--barrier--fence'}, - {'color': [180, 165, 180], - 'id': 5, - 'isthing': 0, - 'name': 'Guard Rail', - 'supercategory': 'construction--barrier--guard-rail'}, - {'color': [90, 120, 150], - 'id': 6, - 'isthing': 0, - 'name': 'Barrier', - 'supercategory': 'construction--barrier--other-barrier'}, - {'color': [102, 102, 156], - 'id': 7, - 'isthing': 0, - 'name': 'Wall', - 'supercategory': 'construction--barrier--wall'}, - {'color': [128, 64, 255], - 'id': 8, - 'isthing': 0, - 'name': 'Bike Lane', - 'supercategory': 'construction--flat--bike-lane'}, - {'color': [140, 140, 200], - 'id': 9, - 'isthing': 1, - 'name': 'Crosswalk - Plain', - 'supercategory': 'construction--flat--crosswalk-plain'}, - {'color': [170, 170, 170], - 'id': 10, - 'isthing': 0, - 'name': 'Curb Cut', - 'supercategory': 'construction--flat--curb-cut'}, - {'color': [250, 170, 160], - 'id': 11, - 'isthing': 0, - 'name': 'Parking', - 'supercategory': 'construction--flat--parking'}, - {'color': [96, 96, 96], - 'id': 12, - 'isthing': 0, - 'name': 'Pedestrian Area', - 'supercategory': 'construction--flat--pedestrian-area'}, - {'color': [230, 150, 140], - 'id': 13, - 'isthing': 0, - 'name': 'Rail Track', - 'supercategory': 'construction--flat--rail-track'}, - {'color': [128, 64, 128], - 'id': 14, - 'isthing': 0, - 'name': 'Road', - 'supercategory': 'construction--flat--road'}, - {'color': [110, 110, 110], - 'id': 15, - 'isthing': 0, - 'name': 'Service Lane', - 'supercategory': 'construction--flat--service-lane'}, - {'color': [244, 35, 232], - 'id': 16, - 'isthing': 0, - 'name': 'Sidewalk', - 'supercategory': 'construction--flat--sidewalk'}, - {'color': [150, 100, 100], - 'id': 17, - 'isthing': 0, - 'name': 'Bridge', - 'supercategory': 'construction--structure--bridge'}, - {'color': [70, 70, 70], - 'id': 18, - 'isthing': 0, - 'name': 'Building', - 'supercategory': 'construction--structure--building'}, - {'color': [150, 120, 90], - 'id': 19, - 'isthing': 0, - 'name': 'Tunnel', - 'supercategory': 'construction--structure--tunnel'}, - {'color': [220, 20, 60], - 'id': 20, - 'isthing': 1, - 'name': 'Person', - 'supercategory': 'human--person'}, - {'color': [255, 0, 0], - 'id': 21, - 'isthing': 1, - 'name': 'Bicyclist', - 'supercategory': 'human--rider--bicyclist'}, - {'color': [255, 0, 100], - 'id': 22, - 'isthing': 1, - 'name': 'Motorcyclist', - 'supercategory': 'human--rider--motorcyclist'}, - {'color': [255, 0, 200], - 'id': 23, - 'isthing': 1, - 'name': 'Other Rider', - 'supercategory': 'human--rider--other-rider'}, - {'color': [200, 128, 128], - 'id': 24, - 'isthing': 1, - 'name': 'Lane Marking - Crosswalk', - 'supercategory': 'marking--crosswalk-zebra'}, - {'color': [255, 255, 255], - 'id': 25, - 'isthing': 0, - 'name': 'Lane Marking - General', - 'supercategory': 'marking--general'}, - {'color': [64, 170, 64], - 'id': 26, - 'isthing': 0, - 'name': 'Mountain', - 'supercategory': 'nature--mountain'}, - {'color': [230, 160, 50], - 'id': 27, - 'isthing': 0, - 'name': 'Sand', - 'supercategory': 'nature--sand'}, - {'color': [70, 130, 180], - 'id': 28, - 'isthing': 0, - 'name': 'Sky', - 'supercategory': 'nature--sky'}, - {'color': [190, 255, 255], - 'id': 29, - 'isthing': 0, - 'name': 'Snow', - 'supercategory': 'nature--snow'}, - {'color': [152, 251, 152], - 'id': 30, - 'isthing': 0, - 'name': 'Terrain', - 'supercategory': 'nature--terrain'}, - {'color': [107, 142, 35], - 'id': 31, - 'isthing': 0, - 'name': 'Vegetation', - 'supercategory': 'nature--vegetation'}, - {'color': [0, 170, 30], - 'id': 32, - 'isthing': 0, - 'name': 'Water', - 'supercategory': 'nature--water'}, - {'color': [255, 255, 128], - 'id': 33, - 'isthing': 1, - 'name': 'Banner', - 'supercategory': 'object--banner'}, - {'color': [250, 0, 30], - 'id': 34, - 'isthing': 1, - 'name': 'Bench', - 'supercategory': 'object--bench'}, - {'color': [100, 140, 180], - 'id': 35, - 'isthing': 1, - 'name': 'Bike Rack', - 'supercategory': 'object--bike-rack'}, - {'color': [220, 220, 220], - 'id': 36, - 'isthing': 1, - 'name': 'Billboard', - 'supercategory': 'object--billboard'}, - {'color': [220, 128, 128], - 'id': 37, - 'isthing': 1, - 'name': 'Catch Basin', - 'supercategory': 'object--catch-basin'}, - {'color': [222, 40, 40], - 'id': 38, - 'isthing': 1, - 'name': 'CCTV Camera', - 'supercategory': 'object--cctv-camera'}, - {'color': [100, 170, 30], - 'id': 39, - 'isthing': 1, - 'name': 'Fire Hydrant', - 'supercategory': 'object--fire-hydrant'}, - {'color': [40, 40, 40], - 'id': 40, - 'isthing': 1, - 'name': 'Junction Box', - 'supercategory': 'object--junction-box'}, - {'color': [33, 33, 33], - 'id': 41, - 'isthing': 1, - 'name': 'Mailbox', - 'supercategory': 'object--mailbox'}, - {'color': [100, 128, 160], - 'id': 42, - 'isthing': 1, - 'name': 'Manhole', - 'supercategory': 'object--manhole'}, - {'color': [142, 0, 0], - 'id': 43, - 'isthing': 1, - 'name': 'Phone Booth', - 'supercategory': 'object--phone-booth'}, - {'color': [70, 100, 150], - 'id': 44, - 'isthing': 0, - 'name': 'Pothole', - 'supercategory': 'object--pothole'}, - {'color': [210, 170, 100], - 'id': 45, - 'isthing': 1, - 'name': 'Street Light', - 'supercategory': 'object--street-light'}, - {'color': [153, 153, 153], - 'id': 46, - 'isthing': 1, - 'name': 'Pole', - 'supercategory': 'object--support--pole'}, - {'color': [128, 128, 128], - 'id': 47, - 'isthing': 1, - 'name': 'Traffic Sign Frame', - 'supercategory': 'object--support--traffic-sign-frame'}, - {'color': [0, 0, 80], - 'id': 48, - 'isthing': 1, - 'name': 'Utility Pole', - 'supercategory': 'object--support--utility-pole'}, - {'color': [250, 170, 30], - 'id': 49, - 'isthing': 1, - 'name': 'Traffic Light', - 'supercategory': 'object--traffic-light'}, - {'color': [192, 192, 192], - 'id': 50, - 'isthing': 1, - 'name': 'Traffic Sign (Back)', - 'supercategory': 'object--traffic-sign--back'}, - {'color': [220, 220, 0], - 'id': 51, - 'isthing': 1, - 'name': 'Traffic Sign (Front)', - 'supercategory': 'object--traffic-sign--front'}, - {'color': [140, 140, 20], - 'id': 52, - 'isthing': 1, - 'name': 'Trash Can', - 'supercategory': 'object--trash-can'}, - {'color': [119, 11, 32], - 'id': 53, - 'isthing': 1, - 'name': 'Bicycle', - 'supercategory': 'object--vehicle--bicycle'}, - {'color': [150, 0, 255], - 'id': 54, - 'isthing': 1, - 'name': 'Boat', - 'supercategory': 'object--vehicle--boat'}, - {'color': [0, 60, 100], - 'id': 55, - 'isthing': 1, - 'name': 'Bus', - 'supercategory': 'object--vehicle--bus'}, - {'color': [0, 0, 142], - 'id': 56, - 'isthing': 1, - 'name': 'Car', - 'supercategory': 'object--vehicle--car'}, - {'color': [0, 0, 90], - 'id': 57, - 'isthing': 1, - 'name': 'Caravan', - 'supercategory': 'object--vehicle--caravan'}, - {'color': [0, 0, 230], - 'id': 58, - 'isthing': 1, - 'name': 'Motorcycle', - 'supercategory': 'object--vehicle--motorcycle'}, - {'color': [0, 80, 100], - 'id': 59, - 'isthing': 0, - 'name': 'On Rails', - 'supercategory': 'object--vehicle--on-rails'}, - {'color': [128, 64, 64], - 'id': 60, - 'isthing': 1, - 'name': 'Other Vehicle', - 'supercategory': 'object--vehicle--other-vehicle'}, - {'color': [0, 0, 110], - 'id': 61, - 'isthing': 1, - 'name': 'Trailer', - 'supercategory': 'object--vehicle--trailer'}, - {'color': [0, 0, 70], - 'id': 62, - 'isthing': 1, - 'name': 'Truck', - 'supercategory': 'object--vehicle--truck'}, - {'color': [0, 0, 192], - 'id': 63, - 'isthing': 1, - 'name': 'Wheeled Slow', - 'supercategory': 'object--vehicle--wheeled-slow'}, - {'color': [32, 32, 32], - 'id': 64, - 'isthing': 0, - 'name': 'Car Mount', - 'supercategory': 'void--car-mount'}, - {'color': [120, 10, 10], - 'id': 65, - 'isthing': 0, - 'name': 'Ego Vehicle', - 'supercategory': 'void--ego-vehicle'} -] - - -def load_mapillary_vistas_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta): - """ - Args: - image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". - gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". - json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". - Returns: - list[dict]: a list of dicts in Detectron2 standard format. (See - `Using Custom Datasets `_ ) - """ - - def _convert_category_id(segment_info, meta): - if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: - segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = True - else: - segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = False - return segment_info - - with PathManager.open(json_file) as f: - json_info = json.load(f) - - ret = [] - for ann in json_info["annotations"]: - image_id = ann["image_id"] - # TODO: currently we assume image and label has the same filename but - # different extension, and images have extension ".jpg" for COCO. Need - # to make image extension a user-provided argument if we extend this - # function to support other COCO-like datasets. - image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") - label_file = os.path.join(gt_dir, ann["file_name"]) - sem_label_file = os.path.join(semseg_dir, ann["file_name"]) - segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] - ret.append( - { - "file_name": image_file, - "image_id": image_id, - "pan_seg_file_name": label_file, - "sem_seg_file_name": sem_label_file, - "segments_info": segments_info, - } - ) - assert len(ret), f"No images found in {image_dir}!" - assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] - assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] - assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"] - return ret - - -def register_mapillary_vistas_panoptic( - name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None -): - """ - Register a "standard" version of ADE20k panoptic segmentation dataset named `name`. - The dictionaries in this registered dataset follows detectron2's standard format. - Hence it's called "standard". - Args: - name (str): the name that identifies a dataset, - e.g. "ade20k_panoptic_train" - metadata (dict): extra metadata associated with this dataset. - image_root (str): directory which contains all the images - panoptic_root (str): directory which contains panoptic annotation images in COCO format - panoptic_json (str): path to the json panoptic annotation file in COCO format - sem_seg_root (none): not used, to be consistent with - `register_coco_panoptic_separated`. - instances_json (str): path to the json instance annotation file - """ - panoptic_name = name - DatasetCatalog.register( - panoptic_name, - lambda: load_mapillary_vistas_panoptic_json( - panoptic_json, image_root, panoptic_root, semantic_root, metadata - ), - ) - MetadataCatalog.get(panoptic_name).set( - panoptic_root=panoptic_root, - image_root=image_root, - panoptic_json=panoptic_json, - json_file=instances_json, - evaluator_type="mapillary_vistas_panoptic_seg", - ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65 - label_divisor=1000, - **metadata, - ) - - -_PREDEFINED_SPLITS_ADE20K_PANOPTIC = { - "mapillary_vistas_panoptic_train": ( - "mapillary_vistas/training/images", - "mapillary_vistas/training/panoptic", - "mapillary_vistas/training/panoptic/panoptic_2018.json", - "mapillary_vistas/training/labels", - ), - "mapillary_vistas_panoptic_val": ( - "mapillary_vistas/validation/images", - "mapillary_vistas/validation/panoptic", - "mapillary_vistas/validation/panoptic/panoptic_2018.json", - "mapillary_vistas/validation/labels", - ), -} - - -def get_metadata(): - meta = {} - # The following metadata maps contiguous id from [0, #thing categories + - # #stuff categories) to their names and colors. We have to replica of the - # same name and color under "thing_*" and "stuff_*" because the current - # visualization function in D2 handles thing and class classes differently - # due to some heuristic used in Panoptic FPN. We keep the same naming to - # enable reusing existing visualization functions. - thing_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES] - thing_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES] - stuff_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES] - stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES] - - meta["thing_classes"] = thing_classes - meta["thing_colors"] = thing_colors - meta["stuff_classes"] = stuff_classes - meta["stuff_colors"] = stuff_colors - - # Convert category id for training: - # category id: like semantic segmentation, it is the class id for each - # pixel. Since there are some classes not used in evaluation, the category - # id is not always contiguous and thus we have two set of category ids: - # - original category id: category id in the original dataset, mainly - # used for evaluation. - # - contiguous category id: [0, #classes), in order to train the linear - # softmax classifier. - thing_dataset_id_to_contiguous_id = {} - stuff_dataset_id_to_contiguous_id = {} - - for i, cat in enumerate(MAPILLARY_VISTAS_SEM_SEG_CATEGORIES): - if cat["isthing"]: - thing_dataset_id_to_contiguous_id[cat["id"]] = i - # else: - # stuff_dataset_id_to_contiguous_id[cat["id"]] = i - - # in order to use sem_seg evaluator - stuff_dataset_id_to_contiguous_id[cat["id"]] = i - - meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id - meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id - - return meta - - -def register_all_mapillary_vistas_panoptic(root): - metadata = get_metadata() - for ( - prefix, - (image_root, panoptic_root, panoptic_json, semantic_root), - ) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items(): - # The "standard" version of COCO panoptic segmentation dataset, - # e.g. used by Panoptic-DeepLab - register_mapillary_vistas_panoptic( - prefix, - metadata, - os.path.join(root, image_root), - os.path.join(root, panoptic_root), - os.path.join(root, semantic_root), - os.path.join(root, panoptic_json), - ) - - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_all_mapillary_vistas_panoptic(_root) diff --git a/spaces/Egrt/LicenseGAN/utils/dataloader.py b/spaces/Egrt/LicenseGAN/utils/dataloader.py deleted file mode 100644 index 5d47fed4aa64a74804206d6da3c98721e45e15fc..0000000000000000000000000000000000000000 --- a/spaces/Egrt/LicenseGAN/utils/dataloader.py +++ /dev/null @@ -1,328 +0,0 @@ -import math -from random import choice, choices, randint - -import cv2 -import numpy as np -from PIL import Image -from torch.utils.data.dataset import Dataset - -from utils import USMSharp_npy, cvtColor, preprocess_input - -from .degradations import (circular_lowpass_kernel, random_add_gaussian_noise, - random_add_poisson_noise, random_mixed_kernels) -from .transforms import augment, paired_random_crop - -def cv_show(image): - image = np.array(image) - image = cv2.resize(image, (256, 128), interpolation=cv2.INTER_CUBIC) - cv2.imshow('image', image) - cv2.waitKey(0) - cv2.destroyAllWindows() - -def get_new_img_size(width, height, img_min_side=600): - if width <= height: - f = float(img_min_side) / width - resized_height = int(f * height) - resized_width = int(img_min_side) - else: - f = float(img_min_side) / height - resized_width = int(f * width) - resized_height = int(img_min_side) - - return resized_width, resized_height - -class SRGANDataset(Dataset): - def __init__(self, train_lines, lr_shape, hr_shape): - super(SRGANDataset, self).__init__() - - self.train_lines = train_lines - self.train_batches = len(train_lines) - - self.lr_shape = lr_shape - self.hr_shape = hr_shape - self.scale = int(hr_shape[0]/lr_shape[0]) - self.usmsharp = USMSharp_npy() - # 第一次滤波的参数 - self.blur_kernel_size = 21 - self.kernel_list = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] - self.kernel_prob = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] - self.sinc_prob = 0.1 - self.blur_sigma = [0.2, 3] - self.betag_range = [0.5, 4] - self.betap_range = [1, 2] - # 第二次滤波的参数 - self.blur_kernel_size2 = 21 - self.kernel_list2 = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] - self.kernel_prob2 = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] - self.sinc_prob2 = 0.1 - self.blur_sigma2 = [0.2, 3] - self.betag_range2 = [0.5, 4] - self.betap_range2 = [1, 2] - # 最后的sinc滤波 - self.final_sinc_prob = 0.8 - # 卷积核大小从7到21分布 - self.kernel_range = [2 * v + 1 for v in range(3, 11)] - # 使用脉冲张量进行卷积不会产生模糊效果 - self.pulse_tensor = np.zeros(shape=[21, 21], dtype='float32') - self.pulse_tensor[10, 10] = 1 - # 第一次退化的参数 - self.resize_prob = [0.2, 0.7, 0.1] # up, down, keep - self.resize_range = [0.15, 1.5] - self.gaussian_noise_prob = 0.5 - self.noise_range = [1, 30] - self.poisson_scale_range = [0.05, 3] - self.gray_noise_prob = 0.4 - self.jpeg_range = [30, 95] - - # 第二次退化的参数 - self.second_blur_prob = 0.8 - self.resize_prob2 = [0.3, 0.4, 0.3] # up, down, keep - self.resize_range2 = [0.3, 1.2] - self.gaussian_noise_prob2= 0.5 - self.noise_range2 = [1, 25] - self.poisson_scale_range2= [0.05, 2.5] - self.gray_noise_prob2 = 0.4 - self.jpeg_range2 = [30, 95] - - def __len__(self): - return self.train_batches - - def __getitem__(self, index): - index = index % self.train_batches - - image_origin = Image.open(self.train_lines[index].split()[0]) - lq, gt = self.get_random_data(image_origin, self.hr_shape) - - gt = np.transpose(preprocess_input(np.array(gt, dtype=np.float32), [0.5,0.5,0.5], [0.5,0.5,0.5]), [2,0,1]) - lq = np.transpose(preprocess_input(np.array(lq, dtype=np.float32), [0.5,0.5,0.5], [0.5,0.5,0.5]), [2,0,1]) - - return lq, gt - - def rand(self, a=0, b=1): - return np.random.rand()*(b-a) + a - - def get_random_data(self, image, input_shape): - #------------------------------# - # 读取图像并转换成RGB图像 - # cvtColor将np转Image - #------------------------------# - image = cvtColor(image) - #------------------------------# - # 获得图像的高宽与目标高宽 - #------------------------------# - iw, ih = image.size - h, w = input_shape - - scale = min(w/iw, h/ih) - nw = int(iw*scale) - nh = int(ih*scale) - dx = (w-nw)//2 - dy = (h-nh)//2 - - #---------------------------------# - # 将图像多余的部分加上灰条 - #---------------------------------# - image = image.resize((nw,nh), Image.BICUBIC) - new_image = Image.new('RGB', (w,h), (128,128,128)) - new_image.paste(image, (dx, dy)) - image = np.array(new_image, np.float32) - - rotate = self.rand()<.5 - if rotate: - angle = np.random.randint(-15,15) - a,b = w/2,h/2 - M = cv2.getRotationMatrix2D((a,b),angle,1) - image = cv2.warpAffine(np.array(image), M, (w,h), borderValue=[128,128,128]) - - # ------------------------ 生成卷积核以进行第一次退化处理 ------------------------ # - kernel_size = choice(self.kernel_range) - if np.random.uniform() < self.sinc_prob: - # 此sinc过滤器设置适用于[7,21]范围内的内核 - if kernel_size < 13: - omega_c = np.random.uniform(np.pi / 3, np.pi) - else: - omega_c = np.random.uniform(np.pi / 5, np.pi) - kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) - else: - kernel = random_mixed_kernels( - self.kernel_list, - self.kernel_prob, - kernel_size, - self.blur_sigma, - self.blur_sigma, [-math.pi, math.pi], - self.betag_range, - self.betap_range, - noise_range=None) - # pad kernel - pad_size = (21 - kernel_size) // 2 - kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) - kernel = kernel.astype(np.float32) - # ------------------------ 生成卷积核以进行第二次退化处理 ------------------------ # - kernel_size = choice(self.kernel_range) - if np.random.uniform() < self.sinc_prob2: - if kernel_size < 13: - omega_c = np.random.uniform(np.pi / 3, np.pi) - else: - omega_c = np.random.uniform(np.pi / 5, np.pi) - kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) - else: - kernel2 = random_mixed_kernels( - self.kernel_list2, - self.kernel_prob2, - kernel_size, - self.blur_sigma2, - self.blur_sigma2, [-math.pi, math.pi], - self.betag_range2, - self.betap_range2, - noise_range=None) - # pad kernel - pad_size = (21 - kernel_size) // 2 - kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size))) - kernel2 = kernel2.astype(np.float32) - # ----------------------the final sinc kernel ------------------------- # - if np.random.uniform() < self.final_sinc_prob: - kernel_size = choice(self.kernel_range) - omega_c = np.random.uniform(np.pi / 3, np.pi) - sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21) - else: - sinc_kernel = self.pulse_tensor - sinc_kernel = sinc_kernel.astype(np.float32) - lq, gt = self.feed_data(image, kernel, kernel2, sinc_kernel) - - return lq, gt - - def feed_data(self, img_gt, kernel1, kernel2, sinc_kernel): - - img_gt = np.array(img_gt, dtype=np.float32) - # 对gt进行锐化 - img_gt = np.clip(img_gt / 255, 0, 1) - gt = self.usmsharp.filt(img_gt) - [ori_w, ori_h, _] = gt.shape - - # ---------------------- 根据参数进行第一次退化 -------------------- # - # 模糊处理 - out = cv2.filter2D(img_gt, -1, kernel1) - # 随机 resize - updown_type = choices(['up', 'down', 'keep'], self.resize_prob)[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.resize_range[1]) - elif updown_type == 'down': - scale = np.random.uniform(self.resize_range[0], 1) - else: - scale = 1 - mode = choice(['area', 'bilinear', 'bicubic']) - if mode=='area': - out = cv2.resize(out, (int(ori_h * scale), int(ori_w * scale)), interpolation=cv2.INTER_AREA) - elif mode=='bilinear': - out = cv2.resize(out, (int(ori_h * scale), int(ori_w * scale)), interpolation=cv2.INTER_LINEAR) - else: - out = cv2.resize(out, (int(ori_h * scale), int(ori_w * scale)), interpolation=cv2.INTER_CUBIC) - - # 灰度噪声 - gray_noise_prob = self.gray_noise_prob - if np.random.uniform() < self.gaussian_noise_prob: - out = random_add_gaussian_noise( - out, sigma_range=self.noise_range, clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise( - out, - scale_range=self.poisson_scale_range, - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG 压缩 - jpeg_p = np.random.uniform(low=self.jpeg_range[0], high=self.jpeg_range[1]) - jpeg_p = int(jpeg_p) - out = np.clip(out, 0, 1) - - encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_p] - _, encimg = cv2.imencode('.jpg', out * 255., encode_param) - out = np.float32(cv2.imdecode(encimg, 1))/255 - - # ---------------------- 根据参数进行第一次退化 -------------------- # - # 模糊 - if np.random.uniform() < self.second_blur_prob: - out = cv2.filter2D(out, -1, kernel2) - # 随机 resize - updown_type = choices(['up', 'down', 'keep'], self.resize_prob2)[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.resize_range2[1]) - elif updown_type == 'down': - scale = np.random.uniform(self.resize_range2[0], 1) - else: - scale = 1 - mode = choice(['area', 'bilinear', 'bicubic']) - if mode == 'area': - out = cv2.resize(out, (int(ori_h / self.scale * scale), int(ori_w / self.scale * scale)), interpolation=cv2.INTER_AREA) - elif mode == 'bilinear': - out = cv2.resize(out, (int(ori_h / self.scale * scale), int(ori_w / self.scale * scale)), interpolation=cv2.INTER_LINEAR) - else: - out = cv2.resize(out, (int(ori_h / self.scale * scale), int(ori_w / self.scale * scale)), interpolation=cv2.INTER_CUBIC) - # 灰度噪声 - gray_noise_prob = self.gray_noise_prob2 - if np.random.uniform() < self.gaussian_noise_prob2: - out = random_add_gaussian_noise( - out, sigma_range=self.noise_range2, clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise( - out, - scale_range=self.poisson_scale_range2, - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG压缩+最后的sinc滤波器 - # 我们还需要将图像的大小调整到所需的尺寸。我们把[调整大小+sinc过滤器]组合在一起 - # 作为一个操作。 - # 我们考虑两个顺序。 - # 1. [调整大小+sinc filter] + JPEG压缩 - # 2. 2. JPEG压缩+[调整大小+sinc过滤]。 - # 根据经验,我们发现其他组合(sinc + JPEG + Resize)会引入扭曲的线条。 - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = choice(['area', 'bilinear', 'bicubic']) - if mode == 'area': - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale), interpolation=cv2.INTER_AREA) - elif mode == 'bilinear': - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale), interpolation=cv2.INTER_LINEAR) - else: - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale), interpolation=cv2.INTER_CUBIC) - - out = cv2.filter2D(out, -1, sinc_kernel) - # JPEG 压缩 - jpeg_p = np.random.uniform(low=self.jpeg_range[0], high=self.jpeg_range[1]) - jpeg_p = jpeg_p - out = np.clip(out, 0, 1) - - encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_p] - _, encimg = cv2.imencode('.jpg', out * 255., encode_param) - out = np.float32(cv2.imdecode(encimg, 1)) / 255 - else: - # JPEG 压缩 - jpeg_p = np.random.uniform(low=self.jpeg_range[0], high=self.jpeg_range[1]) - jpeg_p = jpeg_p - out = np.clip(out, 0, 1) - - encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_p] - _, encimg = cv2.imencode('.jpg', out * 255., encode_param) - out = np.float32(cv2.imdecode(encimg, 1)) / 255 - # resize back + the final sinc filter - mode = choice(['area', 'bilinear', 'bicubic']) - if mode == 'area': - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale),interpolation=cv2.INTER_AREA) - elif mode == 'bilinear': - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale),interpolation=cv2.INTER_LINEAR) - else: - out = cv2.resize(out, (ori_h // self.scale, ori_w // self.scale),interpolation=cv2.INTER_CUBIC) - lq = np.clip((out * 255.0), 0, 255) - gt = np.clip((gt * 255.0), 0, 255) - return Image.fromarray(np.uint8(lq)), Image.fromarray(np.uint8(gt)) - -def SRGAN_dataset_collate(batch): - images_l = [] - images_h = [] - for img_l, img_h in batch: - images_l.append(img_l) - images_h.append(img_h) - return np.array(images_l), np.array(images_h) diff --git a/spaces/EronSamez/RVC_HFmeu/julius/fftconv.py b/spaces/EronSamez/RVC_HFmeu/julius/fftconv.py deleted file mode 100644 index 1920e5369bb49b76eeea1832b7be2a0ddbc8db6b..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/julius/fftconv.py +++ /dev/null @@ -1,183 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 - -""" -Implementation of a FFT based 1D convolution in PyTorch. -While FFT is used in CUDNN for small kernel sizes, it is not the case for long ones, e.g. 512. -This module implements efficient FFT based convolutions for such convolutions. A typical -application is for evaluationg FIR filters with a long receptive field, typically -evaluated with a stride of 1. -""" -from typing import Optional - -import torch -try: - import torch.fft as new_fft -except ImportError: - new_fft = None # type: ignore -from torch.nn import functional as F - -from .core import pad_to, unfold -from .utils import simple_repr - - -# This is quite verbose, but sadly needed to make TorchScript happy. -def _new_rfft(x: torch.Tensor): - z = new_fft.rfft(x, dim=-1) - return torch.view_as_real(z) - - -def _old_rfft(x: torch.Tensor): - return torch.rfft(x, 1) # type: ignore - - -def _old_irfft(x: torch.Tensor, length: int): - result = torch.irfft(x, 1, signal_sizes=(length,)) # type: ignore - return result - - -def _new_irfft(x: torch.Tensor, length: int): - x = torch.view_as_complex(x) - return new_fft.irfft(x, length, dim=-1) - - -if new_fft is None: - _rfft = _old_rfft - _irfft = _old_irfft -else: - _rfft = _new_rfft - _irfft = _new_irfft - - -def _compl_mul_conjugate(a: torch.Tensor, b: torch.Tensor): - """ - Given a and b two tensors of dimension 4 - with the last dimension being the real and imaginary part, - returns a multiplied by the conjugate of b, the multiplication - being with respect to the second dimension. - - """ - # PyTorch 1.7 supports complex number, but not for all operations. - # Once the support is widespread, this can likely go away. - - op = "bcft,dct->bdft" - return torch.stack([ - torch.einsum(op, a[..., 0], b[..., 0]) + torch.einsum(op, a[..., 1], b[..., 1]), - torch.einsum(op, a[..., 1], b[..., 0]) - torch.einsum(op, a[..., 0], b[..., 1]) - ], - dim=-1) - - -def fft_conv1d( - input: torch.Tensor, weight: torch.Tensor, - bias: Optional[torch.Tensor] = None, stride: int = 1, padding: int = 0, - block_ratio: float = 5): - """ - Same as `torch.nn.functional.conv1d` but using FFT for the convolution. - Please check PyTorch documentation for more information. - - Args: - input (Tensor): input signal of shape `[B, C, T]`. - weight (Tensor): weight of the convolution `[D, C, K]` with `D` the number - of output channels. - bias (Tensor or None): if not None, bias term for the convolution. - stride (int): stride of convolution. - padding (int): padding to apply to the input. - block_ratio (float): can be tuned for speed. The input is splitted in chunks - with a size of `int(block_ratio * kernel_size)`. - - Shape: - - - Inputs: `input` is `[B, C, T]`, `weight` is `[D, C, K]` and bias is `[D]`. - - Output: `(*, T)` - - - ..note:: - This function is faster than `torch.nn.functional.conv1d` only in specific cases. - Typically, the kernel size should be of the order of 256 to see any real gain, - for a stride of 1. - - ..Warning:: - Dilation and groups are not supported at the moment. This function might use - more memory than the default Conv1d implementation. - """ - input = F.pad(input, (padding, padding)) - batch, channels, length = input.shape - out_channels, _, kernel_size = weight.shape - - if length < kernel_size: - raise RuntimeError(f"Input should be at least as large as the kernel size {kernel_size}, " - f"but it is only {length} samples long.") - if block_ratio < 1: - raise RuntimeError("Block ratio must be greater than 1.") - - # We are going to process the input blocks by blocks, as for some reason it is faster - # and less memory intensive (I think the culprit is `torch.einsum`. - block_size: int = min(int(kernel_size * block_ratio), length) - fold_stride = block_size - kernel_size + 1 - weight = pad_to(weight, block_size) - weight_z = _rfft(weight) - - # We pad the input and get the different frames, on which - frames = unfold(input, block_size, fold_stride) - - frames_z = _rfft(frames) - out_z = _compl_mul_conjugate(frames_z, weight_z) - out = _irfft(out_z, block_size) - # The last bit is invalid, because FFT will do a circular convolution. - out = out[..., :-kernel_size + 1] - out = out.reshape(batch, out_channels, -1) - out = out[..., ::stride] - target_length = (length - kernel_size) // stride + 1 - out = out[..., :target_length] - if bias is not None: - out += bias[:, None] - return out - - -class FFTConv1d(torch.nn.Module): - """ - Same as `torch.nn.Conv1d` but based on `fft_conv1d`. - Please check PyTorch documentation for more information. - - Args: - in_channels (int): number of input channels. - out_channels (int): number of output channels. - kernel_size (int): kernel size of convolution. - stride (int): stride of convolution. - padding (int): padding to apply to the input. - bias (bool): if True, use a bias term. - - ..note:: - This module is faster than `torch.nn.Conv1d` only in specific cases. - Typically, `kernel_size` should be of the order of 256 to see any real gain, - for a stride of 1. - - ..warning:: - Dilation and groups are not supported at the moment. This module might use - more memory than the default Conv1d implementation. - - >>> fftconv = FFTConv1d(12, 24, 128, 4) - >>> x = torch.randn(4, 12, 1024) - >>> print(list(fftconv(x).shape)) - [4, 24, 225] - """ - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, - stride: int = 1, padding: int = 0, bias: bool = True): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.stride = stride - self.padding = padding - - conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, bias=bias) - self.weight = conv.weight - self.bias = conv.bias - - def forward(self, input: torch.Tensor): - return fft_conv1d( - input, self.weight, self.bias, self.stride, self.padding) - - def __repr__(self): - return simple_repr(self, overrides={"bias": self.bias is not None}) diff --git a/spaces/Fr33d0m21/Remodel_Dreamer/README.md b/spaces/Fr33d0m21/Remodel_Dreamer/README.md deleted file mode 100644 index 788d3d5ba8bc0e1c7c715b59ec2082245fb0ef4e..0000000000000000000000000000000000000000 --- a/spaces/Fr33d0m21/Remodel_Dreamer/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Remodel Dreamer -emoji: 🕵️‍♂️ -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: true -license: mit -duplicated_from: pharma/CLIP-Interrogator ---- - - diff --git a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_new.py b/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_new.py deleted file mode 100644 index 1c0f4fa96d921e979fe31bd4151701b7783fbcea..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_new.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_new - - -class BaseNet(nn.Module): - def __init__( - self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6)) - ): - super(BaseNet, self).__init__() - self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) - self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) - self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) - self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) - self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) - - self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) - - self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) - self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) - self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) - self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) - self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) - - def __call__(self, x): - e1 = self.enc1(x) - e2 = self.enc2(e1) - e3 = self.enc3(e2) - e4 = self.enc4(e3) - e5 = self.enc5(e4) - - h = self.aspp(e5) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = torch.cat([h, self.lstm_dec2(h)], dim=1) - h = self.dec1(h, e1) - - return h - - -class CascadedNet(nn.Module): - def __init__(self, n_fft, nout=32, nout_lstm=128): - super(CascadedNet, self).__init__() - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - self.nin_lstm = self.max_bin // 2 - self.offset = 64 - - self.stg1_low_band_net = nn.Sequential( - BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), - ) - - self.stg1_high_band_net = BaseNet( - 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg2_low_band_net = nn.Sequential( - BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), - ) - self.stg2_high_band_net = BaseNet( - nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg3_full_band_net = BaseNet( - 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm - ) - - self.out = nn.Conv2d(nout, 2, 1, bias=False) - self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) - - def forward(self, x): - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - l1_in = x[:, :, :bandw] - h1_in = x[:, :, bandw:] - l1 = self.stg1_low_band_net(l1_in) - h1 = self.stg1_high_band_net(h1_in) - aux1 = torch.cat([l1, h1], dim=2) - - l2_in = torch.cat([l1_in, l1], dim=1) - h2_in = torch.cat([h1_in, h1], dim=1) - l2 = self.stg2_low_band_net(l2_in) - h2 = self.stg2_high_band_net(h2_in) - aux2 = torch.cat([l2, h2], dim=2) - - f3_in = torch.cat([x, aux1, aux2], dim=1) - f3 = self.stg3_full_band_net(f3_in) - - mask = torch.sigmoid(self.out(f3)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux = torch.cat([aux1, aux2], dim=1) - aux = torch.sigmoid(self.aux_out(aux)) - aux = F.pad( - input=aux, - pad=(0, 0, 0, self.output_bin - aux.size()[2]), - mode="replicate", - ) - return mask, aux - else: - return mask - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset : -self.offset] - assert mask.size()[3] > 0 - - return mask - - def predict(self, x, aggressiveness=None): - mask = self.forward(x) - pred_mag = x * mask - - if self.offset > 0: - pred_mag = pred_mag[:, :, :, self.offset : -self.offset] - assert pred_mag.size()[3] > 0 - - return pred_mag diff --git a/spaces/GXSA/bingo/src/components/ui/input.tsx b/spaces/GXSA/bingo/src/components/ui/input.tsx deleted file mode 100644 index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface InputProps - extends React.InputHTMLAttributes {} - -const Input = React.forwardRef( - ({ className, type, ...props }, ref) => { - return ( - - ) - } -) -Input.displayName = 'Input' - -export { Input } diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_block_insertion.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_block_insertion.py deleted file mode 100644 index ad511701bf21b805757b0cab008b8a3a02b82c49..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_block_insertion.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -import pybullet as p - -class SequentialBlockInsertion(Task): - """Pick up blocks of different colors and insert them into the fixture of the same color in a specific sequence.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "insert the {color} block into the {color} fixture" - self.task_completed_desc = "done inserting blocks." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Define the sequence of colors - colors = ['red', 'blue', 'green', 'yellow'] - - # Add fixtures. - # x, y, z dimensions for the asset size - fixture_size = (0.12, 0.12, 0) - fixture_urdf = 'insertion/fixture.urdf' - fixtures = [] - for color in colors: - fixture_pose = self.get_random_pose(env, fixture_size) - fixture_id = env.add_object(fixture_urdf, fixture_pose, color=utils.COLORS[color], category='fixed') - fixtures.append(fixture_id) - - # Add blocks. - # x, y, z dimensions for the asset size - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/block.urdf' - blocks = [] - for color in colors: - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=utils.COLORS[color]) - blocks.append(block_id) - - # Goal: each block is in the fixture of the same color. - for i in range(len(blocks)): - self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[p.getBasePositionAndOrientation(fixtures[i])], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/len(blocks), language_goal=self.lang_template.format(color=colors[i])) \ No newline at end of file diff --git a/spaces/Giuvyz/rvc-genshin/config.py b/spaces/Giuvyz/rvc-genshin/config.py deleted file mode 100644 index c0c16e0017efbcaf250cb539a1d0edb4e83575e4..0000000000000000000000000000000000000000 --- a/spaces/Giuvyz/rvc-genshin/config.py +++ /dev/null @@ -1,88 +0,0 @@ -########################硬件参数######################## - -# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速 -device = "cuda:0" - -# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速 -is_half = True - -# 默认0用上所有线程,写数字限制CPU资源使用 -n_cpu = 0 - -########################硬件参数######################## - - -##################下为参数处理逻辑,勿动################## - -########################命令行参数######################## -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--port", type=int, default=7865, help="Listen port") -parser.add_argument("--pycmd", type=str, default="python", help="Python command") -parser.add_argument("--colab", action="store_true", help="Launch in colab") -parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" -) -parser.add_argument( - "--noautoopen", action="store_true", help="Do not open in browser automatically" -) -cmd_opts, unknown = parser.parse_known_args() - -python_cmd = cmd_opts.pycmd -listen_port = cmd_opts.port -iscolab = cmd_opts.colab -noparallel = cmd_opts.noparallel -noautoopen = cmd_opts.noautoopen -########################命令行参数######################## - -import sys -import torch - - -# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. -# check `getattr` and try it for compatibility -def has_mps() -> bool: - if sys.platform != "darwin": - return False - else: - if not getattr(torch, "has_mps", False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - -if not torch.cuda.is_available(): - if has_mps(): - print("没有发现支持的N卡, 使用MPS进行推理") - device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - device = "cpu" - is_half = False - -if device not in ["cpu", "mps"]: - gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) - if "16" in gpu_name or "MX" in gpu_name: - print("16系显卡/MX系显卡强制单精度") - is_half = False - -from multiprocessing import cpu_count - -if n_cpu == 0: - n_cpu = cpu_count() -if is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 -else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 diff --git a/spaces/Glasscupps/Hello/README.md b/spaces/Glasscupps/Hello/README.md deleted file mode 100644 index 31b73e239a6319e468b6dea2497dafe777b2149a..0000000000000000000000000000000000000000 --- a/spaces/Glasscupps/Hello/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Hello -emoji: ⚡ -colorFrom: purple -colorTo: green -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gradio-Blocks/EmojiGAN/dnnlib/__init__.py b/spaces/Gradio-Blocks/EmojiGAN/dnnlib/__init__.py deleted file mode 100644 index 2f08cf36f11f9b0fd94c1b7caeadf69b98375b04..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/EmojiGAN/dnnlib/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from .util import EasyDict, make_cache_dir_path diff --git a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/upfirdn2d.py b/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/upfirdn2d.py deleted file mode 100644 index ceeac2b9834e33b7c601c28bf27f32aa91c69256..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/upfirdn2d.py +++ /dev/null @@ -1,384 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient resampling of 2D images.""" - -import os -import warnings -import numpy as np -import torch -import traceback - -from .. import custom_ops -from .. import misc -from . import conv2d_gradfix - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None - -def _init(): - global _inited, _plugin - if not _inited: - sources = ['upfirdn2d.cpp', 'upfirdn2d.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc()) - return _plugin is not None - -def _parse_scaling(scaling): - if isinstance(scaling, int): - scaling = [scaling, scaling] - assert isinstance(scaling, (list, tuple)) - assert all(isinstance(x, int) for x in scaling) - sx, sy = scaling - assert sx >= 1 and sy >= 1 - return sx, sy - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, int) for x in padding) - if len(padding) == 2: - padx, pady = padding - padding = [padx, padx, pady, pady] - padx0, padx1, pady0, pady1 = padding - return padx0, padx1, pady0, pady1 - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - fw = f.shape[-1] - fh = f.shape[0] - with misc.suppress_tracer_warnings(): - fw = int(fw) - fh = int(fh) - misc.assert_shape(f, [fh, fw][:f.ndim]) - assert fw >= 1 and fh >= 1 - return fw, fh - -#---------------------------------------------------------------------------- - -def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): - r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. - - Args: - f: Torch tensor, numpy array, or python list of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), - `[]` (impulse), or - `None` (identity). - device: Result device (default: cpu). - normalize: Normalize the filter so that it retains the magnitude - for constant input signal (DC)? (default: True). - flip_filter: Flip the filter? (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - separable: Return a separable filter? (default: select automatically). - - Returns: - Float32 tensor of the shape - `[filter_height, filter_width]` (non-separable) or - `[filter_taps]` (separable). - """ - # Validate. - if f is None: - f = 1 - f = torch.as_tensor(f, dtype=torch.float32) - assert f.ndim in [0, 1, 2] - assert f.numel() > 0 - if f.ndim == 0: - f = f[np.newaxis] - - # Separable? - if separable is None: - separable = (f.ndim == 1 and f.numel() >= 8) - if f.ndim == 1 and not separable: - f = f.ger(f) - assert f.ndim == (1 if separable else 2) - - # Apply normalize, flip, gain, and device. - if normalize: - f /= f.sum() - if flip_filter: - f = f.flip(list(range(f.ndim))) - f = f * (gain ** (f.ndim / 2)) - f = f.to(device=device) - return f - -#---------------------------------------------------------------------------- - -def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Pad, upsample, filter, and downsample a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 2. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by keeping every Nth pixel (`down`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) - return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): - """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - assert f.dtype == torch.float32 and not f.requires_grad - batch_size, num_channels, in_height, in_width = x.shape - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Upsample by inserting zeros. - x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) - x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) - x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) - - # Pad or crop. - x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) - x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] - - # Setup filter. - f = f * (gain ** (f.ndim / 2)) - f = f.to(x.dtype) - if not flip_filter: - f = f.flip(list(range(f.ndim))) - - # Convolve with the filter. - f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) - if f.ndim == 4: - x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) - else: - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) - - # Downsample by throwing away pixels. - x = x[:, :, ::downy, ::downx] - return x - -#---------------------------------------------------------------------------- - -_upfirdn2d_cuda_cache = dict() - -def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): - """Fast CUDA implementation of `upfirdn2d()` using custom ops. - """ - # Parse arguments. - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Lookup from cache. - key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - if key in _upfirdn2d_cuda_cache: - return _upfirdn2d_cuda_cache[key] - - # Forward op. - class Upfirdn2dCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, f): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - y = x - if f.ndim == 2: - y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - else: - y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain)) - y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain)) - ctx.save_for_backward(f) - ctx.x_shape = x.shape - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - f, = ctx.saved_tensors - _, _, ih, iw = ctx.x_shape - _, _, oh, ow = dy.shape - fw, fh = _get_filter_size(f) - p = [ - fw - padx0 - 1, - iw * upx - ow * downx + padx0 - upx + 1, - fh - pady0 - 1, - ih * upy - oh * downy + pady0 - upy + 1, - ] - dx = None - df = None - - if ctx.needs_input_grad[0]: - dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) - - assert not ctx.needs_input_grad[1] - return dx, df - - # Add to cache. - _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda - return Upfirdn2dCuda - -#---------------------------------------------------------------------------- - -def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Filter a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape matches the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + fw // 2, - padx1 + (fw - 1) // 2, - pady0 + fh // 2, - pady1 + (fh - 1) // 2, - ] - return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Upsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a multiple of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - upx, upy = _parse_scaling(up) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw + upx - 1) // 2, - padx1 + (fw - upx) // 2, - pady0 + (fh + upy - 1) // 2, - pady1 + (fh - upy) // 2, - ] - return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Downsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a fraction of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the input. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw - downx + 1) // 2, - padx1 + (fw - downx) // 2, - pady0 + (fh - downy + 1) // 2, - pady1 + (fh - downy) // 2, - ] - return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hmmbuild.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hmmbuild.py deleted file mode 100644 index f3c573047450f5f17e791ad9a54f1b436e71b095..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hmmbuild.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A Python wrapper for hmmbuild - construct HMM profiles from MSA.""" - -import os -import re -import subprocess - -from absl import logging -from alphafold.data.tools import utils -# Internal import (7716). - - -class Hmmbuild(object): - """Python wrapper of the hmmbuild binary.""" - - def __init__(self, - *, - binary_path: str, - singlemx: bool = False): - """Initializes the Python hmmbuild wrapper. - - Args: - binary_path: The path to the hmmbuild executable. - singlemx: Whether to use --singlemx flag. If True, it forces HMMBuild to - just use a common substitution score matrix. - - Raises: - RuntimeError: If hmmbuild binary not found within the path. - """ - self.binary_path = binary_path - self.singlemx = singlemx - - def build_profile_from_sto(self, sto: str, model_construction='fast') -> str: - """Builds a HHM for the aligned sequences given as an A3M string. - - Args: - sto: A string with the aligned sequences in the Stockholm format. - model_construction: Whether to use reference annotation in the msa to - determine consensus columns ('hand') or default ('fast'). - - Returns: - A string with the profile in the HMM format. - - Raises: - RuntimeError: If hmmbuild fails. - """ - return self._build_profile(sto, model_construction=model_construction) - - def build_profile_from_a3m(self, a3m: str) -> str: - """Builds a HHM for the aligned sequences given as an A3M string. - - Args: - a3m: A string with the aligned sequences in the A3M format. - - Returns: - A string with the profile in the HMM format. - - Raises: - RuntimeError: If hmmbuild fails. - """ - lines = [] - for line in a3m.splitlines(): - if not line.startswith('>'): - line = re.sub('[a-z]+', '', line) # Remove inserted residues. - lines.append(line + '\n') - msa = ''.join(lines) - return self._build_profile(msa, model_construction='fast') - - def _build_profile(self, msa: str, model_construction: str = 'fast') -> str: - """Builds a HMM for the aligned sequences given as an MSA string. - - Args: - msa: A string with the aligned sequences, in A3M or STO format. - model_construction: Whether to use reference annotation in the msa to - determine consensus columns ('hand') or default ('fast'). - - Returns: - A string with the profile in the HMM format. - - Raises: - RuntimeError: If hmmbuild fails. - ValueError: If unspecified arguments are provided. - """ - if model_construction not in {'hand', 'fast'}: - raise ValueError(f'Invalid model_construction {model_construction} - only' - 'hand and fast supported.') - - with utils.tmpdir_manager(base_dir='/tmp') as query_tmp_dir: - input_query = os.path.join(query_tmp_dir, 'query.msa') - output_hmm_path = os.path.join(query_tmp_dir, 'output.hmm') - - with open(input_query, 'w') as f: - f.write(msa) - - cmd = [self.binary_path] - # If adding flags, we have to do so before the output and input: - - if model_construction == 'hand': - cmd.append(f'--{model_construction}') - if self.singlemx: - cmd.append('--singlemx') - cmd.extend([ - '--amino', - output_hmm_path, - input_query, - ]) - - logging.info('Launching subprocess %s', cmd) - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - with utils.timing('hmmbuild query'): - stdout, stderr = process.communicate() - retcode = process.wait() - logging.info('hmmbuild stdout:\n%s\n\nstderr:\n%s\n', - stdout.decode('utf-8'), stderr.decode('utf-8')) - - if retcode: - raise RuntimeError('hmmbuild failed\nstdout:\n%s\n\nstderr:\n%s\n' - % (stdout.decode('utf-8'), stderr.decode('utf-8'))) - - with open(output_hmm_path, encoding='utf-8') as f: - hmm = f.read() - - return hmm diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/encodec_audiogen_16khz.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/encodec_audiogen_16khz.py deleted file mode 100644 index c9b41f684045594bb264cfb7f4f15d1da439382c..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/encodec_audiogen_16khz.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid shows how to train the new AudioGen EnCodec model at 16 kHz. -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=8, partition=partitions) - # use configuration for AudioGen's EnCodec model trained on monophonic audio sampled at 16 kHz - # AudioGen's EnCodec is trained with a total stride of 320 leading to a frame rate of 50 hz - launcher.bind_(solver='compression/encodec_audiogen_16khz') - # replace this by the desired sound dataset - launcher.bind_(dset='internal/sounds_16khz') - # launch xp - launcher() diff --git a/spaces/GroveStreet/GTA_SOVITS/modules/__init__.py b/spaces/GroveStreet/GTA_SOVITS/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py deleted file mode 100644 index 705a04fb49658c91114a26efd411b4653c65b943..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch -import torch.nn.functional as F -from fairseq.models.nat import ( - _apply_del_words, - _apply_ins_masks, - _apply_ins_words, - _fill, - _skip, - _skip_encoder_out, -) - - -class _EnsembleModelEncoder(object): - def __init__(self, models): - self.models = models - - def reorder_encoder_out(self, encoder_outs, new_order): - encoder_outs = [ - model.encoder.reorder_encoder_out(encoder_out, new_order) - for model, encoder_out in zip(self.models, encoder_outs) - ] - return encoder_outs - - -class BasicEnsembleModel(torch.nn.Module): - """A wrapper around an ensemble of models.""" - - def __init__(self, models): - super().__init__() - self.models = torch.nn.ModuleList(models) - self.bos = self.models[0].decoder.dictionary.bos() - self.eos = self.models[0].decoder.dictionary.eos() - self.pad = self.models[0].decoder.dictionary.pad() - self.unk = self.models[0].decoder.dictionary.unk() - self.encoder = _EnsembleModelEncoder(self.models) - - def has_encoder(self): - return hasattr(self.models[0], "encoder") - - def max_decoder_positions(self): - return min(m.max_decoder_positions() for m in self.models) - - @torch.no_grad() - def forward_encoder(self, encoder_input): - if not self.has_encoder(): - return None - return [model.forward_encoder(encoder_input) for model in self.models] - - @torch.no_grad() - def forward_decoder(self, *inputs): - raise NotImplementedError - - def initialize_output_tokens(self, *inputs): - raise NotImplementedError - - -class EnsembleLevT(BasicEnsembleModel): - """A wrapper around an ensemble of models.""" - - def __init__(self, models): - super().__init__(models) - - @torch.no_grad() - def forward_decoder( - self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs - ): - # LevT ensembling - # A pipeline of three steps: deletion, placeholder, and word insertion. - # We need to average scores in each step in a pipeline way because of dependence. - # deletion - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - attn = decoder_out.attn - - bsz = output_tokens.size(0) - if max_ratio is None: - max_lens = output_tokens.new().fill_(255) - else: - if not encoder_outs[0]["encoder_padding_mask"]: - src_lens = ( - encoder_outs[0]["encoder_out"][0].new(bsz) - .fill_(encoder_outs[0]["encoder_out"][0].size(1)) - ) - else: - src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1) - max_lens = (src_lens * max_ratio).clamp(min=10).long() - - # delete words - # do not delete tokens if it is - can_del_word = output_tokens.ne(self.pad).sum(1) > 2 - if can_del_word.sum() != 0: # we cannot delete, skip - output_tokens, output_scores, attn = self.forward_word_del( - encoder_outs, - output_tokens, - output_scores, - attn, - can_del_word, - ) - - # insert placeholders - can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens - if can_ins_mask.sum() != 0: - output_tokens, output_scores = self.forward_mask_ins( - encoder_outs, - output_tokens, - output_scores, - can_ins_mask, - eos_penalty, - max_lens, - ) - - # insert words - can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 - if can_ins_word.sum() != 0: - output_tokens, output_scores, attn = self.forward_word_ins( - encoder_outs, - output_tokens, - output_scores, - attn, - can_ins_word, - ) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - attn = None if attn is None else attn[:, :cut_off, :] - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=attn, - history=None, - ) - - def forward_word_del( - self, encoder_outs, output_tokens, output_scores, attn, can_del_word - ): - word_del_score_avg = [] - word_del_attn_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - word_del_out, word_del_attn = model.decoder.forward_word_del( - _skip(output_tokens, can_del_word), - _skip_encoder_out(model.encoder, encoder_out, can_del_word), - ) - word_del_score = F.log_softmax(word_del_out, 2) - word_del_score_avg.append(word_del_score) - word_del_attn_avg.append(word_del_attn) - word_del_score_avg = torch.logsumexp( - torch.stack(word_del_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - word_del_pred = word_del_score_avg.max(-1)[1].bool() - if word_del_attn_avg[0] is not None: - word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models) - else: - word_del_attn_avg = None - - _tokens, _scores, _attn = _apply_del_words( - output_tokens[can_del_word], - output_scores[can_del_word], - word_del_attn_avg, - word_del_pred, - self.pad, - self.bos, - self.eos, - ) - output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_del_word, _scores, 0) - attn = _fill(attn, can_del_word, _attn, 0.0) - return output_tokens, output_scores, attn - - def forward_mask_ins( - self, - encoder_outs, - output_tokens, - output_scores, - can_ins_mask, - eos_penalty, - max_lens, - ): - mask_ins_score_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - mask_ins_out, _ = model.decoder.forward_mask_ins( - _skip(output_tokens, can_ins_mask), - _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), - ) - mask_ins_score = F.log_softmax(mask_ins_out, 2) - if eos_penalty > 0.0: - mask_ins_score[:, :, 0] -= eos_penalty - mask_ins_score_avg.append(mask_ins_score) - mask_ins_score_avg = torch.logsumexp( - torch.stack(mask_ins_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - mask_ins_pred = mask_ins_score_avg.max(-1)[1] - mask_ins_pred = torch.min( - mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) - ) - _tokens, _scores = _apply_ins_masks( - output_tokens[can_ins_mask], - output_scores[can_ins_mask], - mask_ins_pred, - self.pad, - self.unk, - self.eos, - ) - output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_mask, _scores, 0) - return output_tokens, output_scores - - def forward_word_ins( - self, encoder_outs, output_tokens, output_scores, attn, can_ins_word - ): - word_ins_score_avg = [] - word_ins_attn_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - word_ins_out, word_ins_attn = model.decoder.forward_word_ins( - _skip(output_tokens, can_ins_word), - _skip_encoder_out(model.encoder, encoder_out, can_ins_word), - ) - word_ins_score = F.log_softmax(word_ins_out, 2) - word_ins_score_avg.append(word_ins_score) - word_ins_attn_avg.append(word_ins_attn) - word_ins_score_avg = torch.logsumexp( - torch.stack(word_ins_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - if word_ins_attn_avg[0] is not None: - word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models) - else: - word_ins_attn_avg = None - word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) - - _tokens, _scores = _apply_ins_words( - output_tokens[can_ins_word], - output_scores[can_ins_word], - word_ins_pred, - word_ins_score_max, - self.unk, - ) - - output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_word, _scores, 0) - attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) - return output_tokens, output_scores, attn - - def initialize_output_tokens(self, encoder_outs, src_tokens): - # LevT doesn't do length prediction. - return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens) diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/train.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/train.py deleted file mode 100644 index 709e085d019eb98006b26555f7fe2582d759efa6..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/train.py +++ /dev/null @@ -1,400 +0,0 @@ -import warnings - -warnings.simplefilter(action="ignore", category=FutureWarning) -import itertools -import os -import time -import argparse -import json -import torch -import torch.nn.functional as F -from torch.utils.tensorboard import SummaryWriter -from torch.utils.data import DistributedSampler, DataLoader -import torch.multiprocessing as mp -from torch.distributed import init_process_group -from torch.nn.parallel import DistributedDataParallel -from env import AttrDict, build_env -from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist -from models import ( - Generator, - MultiPeriodDiscriminator, - MultiScaleDiscriminator, - feature_loss, - generator_loss, - discriminator_loss, -) -from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint - -torch.backends.cudnn.benchmark = True - - -def train(rank, a, h): - if h.num_gpus > 1: - init_process_group( - backend=h.dist_config["dist_backend"], - init_method=h.dist_config["dist_url"], - world_size=h.dist_config["world_size"] * h.num_gpus, - rank=rank, - ) - - torch.cuda.manual_seed(h.seed) - device = torch.device("cuda:{:d}".format(rank)) - - generator = Generator(h).to(device) - mpd = MultiPeriodDiscriminator().to(device) - msd = MultiScaleDiscriminator().to(device) - - if rank == 0: - print(generator) - os.makedirs(a.checkpoint_path, exist_ok=True) - print("checkpoints directory : ", a.checkpoint_path) - - if os.path.isdir(a.checkpoint_path): - cp_g = scan_checkpoint(a.checkpoint_path, "g_") - cp_do = scan_checkpoint(a.checkpoint_path, "do_") - - steps = 0 - if cp_g is None or cp_do is None: - state_dict_do = None - last_epoch = -1 - else: - state_dict_g = load_checkpoint(cp_g, device) - state_dict_do = load_checkpoint(cp_do, device) - generator.load_state_dict(state_dict_g["generator"]) - mpd.load_state_dict(state_dict_do["mpd"]) - msd.load_state_dict(state_dict_do["msd"]) - steps = state_dict_do["steps"] + 1 - last_epoch = state_dict_do["epoch"] - - if h.num_gpus > 1: - generator = DistributedDataParallel(generator, device_ids=[rank]).to(device) - mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device) - msd = DistributedDataParallel(msd, device_ids=[rank]).to(device) - - optim_g = torch.optim.AdamW( - generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2] - ) - optim_d = torch.optim.AdamW( - itertools.chain(msd.parameters(), mpd.parameters()), - h.learning_rate, - betas=[h.adam_b1, h.adam_b2], - ) - - if state_dict_do is not None: - optim_g.load_state_dict(state_dict_do["optim_g"]) - optim_d.load_state_dict(state_dict_do["optim_d"]) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR( - optim_g, gamma=h.lr_decay, last_epoch=last_epoch - ) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - optim_d, gamma=h.lr_decay, last_epoch=last_epoch - ) - - training_filelist, validation_filelist = get_dataset_filelist(a) - - trainset = MelDataset( - training_filelist, - h.segment_size, - h.n_fft, - h.num_mels, - h.hop_size, - h.win_size, - h.sampling_rate, - h.fmin, - h.fmax, - n_cache_reuse=0, - shuffle=False if h.num_gpus > 1 else True, - fmax_loss=h.fmax_for_loss, - device=device, - fine_tuning=a.fine_tuning, - base_mels_path=a.input_mels_dir, - ) - - train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None - - train_loader = DataLoader( - trainset, - num_workers=h.num_workers, - shuffle=False, - sampler=train_sampler, - batch_size=h.batch_size, - pin_memory=True, - drop_last=True, - ) - - if rank == 0: - validset = MelDataset( - validation_filelist, - h.segment_size, - h.n_fft, - h.num_mels, - h.hop_size, - h.win_size, - h.sampling_rate, - h.fmin, - h.fmax, - False, - False, - n_cache_reuse=0, - fmax_loss=h.fmax_for_loss, - device=device, - fine_tuning=a.fine_tuning, - base_mels_path=a.input_mels_dir, - ) - validation_loader = DataLoader( - validset, - num_workers=1, - shuffle=False, - sampler=None, - batch_size=1, - pin_memory=True, - drop_last=True, - ) - - sw = SummaryWriter(os.path.join(a.logs_path)) - - generator.train() - mpd.train() - msd.train() - for epoch in range(max(0, last_epoch), a.training_epochs): - if rank == 0: - start = time.time() - print("Epoch: {}".format(epoch + 1)) - - if h.num_gpus > 1: - train_sampler.set_epoch(epoch) - - for i, batch in enumerate(train_loader): - if rank == 0: - start_b = time.time() - x, y, _, y_mel = batch - x = torch.autograd.Variable(x.to(device, non_blocking=True)) - y = torch.autograd.Variable(y.to(device, non_blocking=True)) - y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True)) - y = y.unsqueeze(1) - - y_g_hat = generator(x) - y_g_hat_mel = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax_for_loss, - ) - - optim_d.zero_grad() - - # MPD - y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach()) - loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss( - y_df_hat_r, y_df_hat_g - ) - - # MSD - y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach()) - loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss( - y_ds_hat_r, y_ds_hat_g - ) - - loss_disc_all = loss_disc_s + loss_disc_f - - loss_disc_all.backward() - optim_d.step() - - # Generator - optim_g.zero_grad() - - # L1 Mel-Spectrogram Loss - loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45 - - y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat) - y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat) - loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) - loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) - loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) - loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) - loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel - - loss_gen_all.backward() - optim_g.step() - - if rank == 0: - # STDOUT logging - if steps % a.stdout_interval == 0: - with torch.no_grad(): - mel_error = F.l1_loss(y_mel, y_g_hat_mel).item() - - print( - "Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}".format( - steps, loss_gen_all, mel_error, time.time() - start_b - ) - ) - - # checkpointing - if steps % a.checkpoint_interval == 0 and steps != 0: - checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps) - save_checkpoint( - checkpoint_path, - { - "generator": ( - generator.module if h.num_gpus > 1 else generator - ).state_dict() - }, - ) - checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps) - save_checkpoint( - checkpoint_path, - { - "mpd": (mpd.module if h.num_gpus > 1 else mpd).state_dict(), - "msd": (msd.module if h.num_gpus > 1 else msd).state_dict(), - "optim_g": optim_g.state_dict(), - "optim_d": optim_d.state_dict(), - "steps": steps, - "epoch": epoch, - }, - ) - - # Tensorboard summary logging - if steps % a.summary_interval == 0: - sw.add_scalar("training/gen_loss_total", loss_gen_all, steps) - sw.add_scalar("training/mel_spec_error", mel_error, steps) - - # Validation - if steps % a.validation_interval == 0: # and steps != 0: - generator.eval() - torch.cuda.empty_cache() - val_err_tot = 0 - with torch.no_grad(): - for j, batch in enumerate(validation_loader): - x, y, _, y_mel = batch - y_g_hat = generator(x.to(device)) - y_mel = torch.autograd.Variable( - y_mel.to(device, non_blocking=True) - ) - y_g_hat_mel = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax_for_loss, - ) - val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item() - - if j <= 4: - if steps == 0: - sw.add_audio( - "gt/y_{}".format(j), - y[0], - steps, - h.sampling_rate, - ) - sw.add_figure( - "gt/y_spec_{}".format(j), - plot_spectrogram(x[0]), - steps, - ) - - sw.add_audio( - "generated/y_hat_{}".format(j), - y_g_hat[0], - steps, - h.sampling_rate, - ) - y_hat_spec = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax, - ) - sw.add_figure( - "generated/y_hat_spec_{}".format(j), - plot_spectrogram( - y_hat_spec.squeeze(0).cpu().numpy() - ), - steps, - ) - - val_err = val_err_tot / (j + 1) - sw.add_scalar("validation/mel_spec_error", val_err, steps) - - generator.train() - - steps += 1 - - scheduler_g.step() - scheduler_d.step() - - if rank == 0: - print( - "Time taken for epoch {} is {} sec\n".format( - epoch + 1, int(time.time() - start) - ) - ) - - -def main(): - print("Initializing Training Process..") - - parser = argparse.ArgumentParser() - - parser.add_argument("--group_name", default=None) - parser.add_argument("--input_wavs_dir", default="LJSpeech-1.1/wavs") - parser.add_argument("--input_mels_dir", default="ft_dataset") - parser.add_argument("--input_training_file", default="LJSpeech-1.1/training.txt") - parser.add_argument( - "--input_validation_file", default="LJSpeech-1.1/validation.txt" - ) - parser.add_argument("--checkpoint_path", default="cp_hifigan") - parser.add_argument("--logs_path", default="") - parser.add_argument("--config", default="") - parser.add_argument("--training_epochs", default=3100, type=int) - parser.add_argument("--stdout_interval", default=5, type=int) - parser.add_argument("--checkpoint_interval", default=5000, type=int) - parser.add_argument("--summary_interval", default=100, type=int) - parser.add_argument("--validation_interval", default=1000, type=int) - parser.add_argument("--fine_tuning", default=False, type=bool) - - a = parser.parse_args() - - with open(a.config) as f: - data = f.read() - - json_config = json.loads(data) - h = AttrDict(json_config) - build_env(a.config, "config.json", a.checkpoint_path) - - torch.manual_seed(h.seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - h.num_gpus = torch.cuda.device_count() - h.batch_size = int(h.batch_size / h.num_gpus) - print("Batch size per GPU :", h.batch_size) - else: - pass - - if h.num_gpus > 1: - mp.spawn( - train, - nprocs=h.num_gpus, - args=( - a, - h, - ), - ) - else: - train(0, a, h) - - -if __name__ == "__main__": - main() diff --git a/spaces/Hexequin/Linaqruf-anything-v3.0/app.py b/spaces/Hexequin/Linaqruf-anything-v3.0/app.py deleted file mode 100644 index 16e8131a0bbf7b06956e69e2b7758fa01e4eb51f..0000000000000000000000000000000000000000 --- a/spaces/Hexequin/Linaqruf-anything-v3.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Linaqruf/anything-v3.0").launch() \ No newline at end of file diff --git a/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/openaimodel.py b/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/openaimodel.py deleted file mode 100644 index 7df6b5abfe8eff07f0c8e8703ba8aee90d45984b..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/openaimodel.py +++ /dev/null @@ -1,786 +0,0 @@ -from abc import abstractmethod -import math - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from ldm.modules.diffusionmodules.util import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) -from ldm.modules.attention import SpatialTransformer -from ldm.util import exists - - -# dummy replace -def convert_module_to_f16(x): - pass - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - -class TransposedUpsample(nn.Module): - 'Learned 2x upsampling without padding' - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) - - def forward(self,x): - return self.up(x) - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - #return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - disable_self_attentions=None, - num_attention_blocks=None, - disable_middle_self_attn=False, - use_linear_in_transformer=False, - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - if isinstance(num_res_blocks, int): - self.num_res_blocks = len(channel_mult) * [num_res_blocks] - else: - if len(num_res_blocks) != len(channel_mult): - raise ValueError("provide num_res_blocks either as an int (globally constant) or " - "as a list/tuple (per-level) with the same length as channel_mult") - self.num_res_blocks = num_res_blocks - if disable_self_attentions is not None: - # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not - assert len(disable_self_attentions) == len(channel_mult) - if num_attention_blocks is not None: - assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - if isinstance(self.num_classes, int): - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - elif self.num_classes == "continuous": - print("setting up linear c_adm embedding layer") - self.label_emb = nn.Linear(1, time_embed_dim) - else: - raise ValueError() - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for nr in range(self.num_res_blocks[level]): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(self.num_res_blocks[level] + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or i < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ) - ) - if level and i == self.num_res_blocks[level]: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - if self.predict_codebook_ids: - self.id_predictor = nn.Sequential( - normalization(ch), - conv_nd(dims, model_channels, n_embed, 1), - #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits - ) - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps=None, context=None, y=None,**kwargs): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param context: conditioning plugged in via crossattn - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - if self.num_classes is not None: - assert y.shape[0] == x.shape[0] - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb, context) - h = h.type(x.dtype) - if self.predict_codebook_ids: - return self.id_predictor(h) - else: - return self.out(h) diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py b/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py deleted file mode 100644 index 216093f7087a61060767babf5a3f3f4e716a4dfe..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os - -import numpy as np -import torch -from fairseq.data import ( - Dictionary, - IdDataset, - ListDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - RawLabelDataset, - RightPadDataset, - SortDataset, - data_utils, - encoders, -) -from fairseq.tasks import LegacyFairseqTask, register_task - - -@register_task("commonsense_qa") -class CommonsenseQATask(LegacyFairseqTask): - """Task to finetune RoBERTa for Commonsense QA.""" - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument( - "data", metavar="DIR", help="path to data directory; we load .jsonl" - ) - parser.add_argument( - "--init-token", - type=int, - default=None, - help="add token at the beginning of each batch item", - ) - parser.add_argument("--num-classes", type=int, default=5) - - def __init__(self, args, vocab): - super().__init__(args) - self.vocab = vocab - self.mask = vocab.add_symbol("") - - self.bpe = encoders.build_bpe(args) - - @classmethod - def load_dictionary(cls, filename): - """Load the dictionary from the filename - - Args: - filename (str): the filename - """ - dictionary = Dictionary.load(filename) - dictionary.add_symbol("") - return dictionary - - @classmethod - def setup_task(cls, args, **kwargs): - assert ( - args.criterion == "sentence_ranking" - ), "Must set --criterion=sentence_ranking" - - # load data and label dictionaries - vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) - print("| dictionary: {} types".format(len(vocab))) - - return cls(args, vocab) - - def load_dataset( - self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs - ): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - - def binarize(s, append_bos=False): - if self.bpe is not None: - s = self.bpe.encode(s) - tokens = self.vocab.encode_line( - s, - append_eos=True, - add_if_not_exist=False, - ).long() - if append_bos and self.args.init_token is not None: - tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) - return tokens - - if data_path is None: - data_path = os.path.join(self.args.data, split + ".jsonl") - if not os.path.exists(data_path): - raise FileNotFoundError("Cannot find data: {}".format(data_path)) - - src_tokens = [[] for i in range(self.args.num_classes)] - src_lengths = [[] for i in range(self.args.num_classes)] - labels = [] - - with open(data_path) as h: - for line in h: - example = json.loads(line.strip()) - if "answerKey" in example: - label = ord(example["answerKey"]) - ord("A") - labels.append(label) - question = example["question"]["stem"] - assert len(example["question"]["choices"]) == self.args.num_classes - # format: ` Q: Where would I not want a fox? A: hen house ` - question = "Q: " + question - question_toks = binarize(question, append_bos=True) - for i, choice in enumerate(example["question"]["choices"]): - src = "A: " + choice["text"] - src_bin = torch.cat([question_toks, binarize(src)]) - src_tokens[i].append(src_bin) - src_lengths[i].append(len(src_bin)) - assert all( - len(src_tokens[0]) == len(src_tokens[i]) - for i in range(self.args.num_classes) - ) - assert len(src_tokens[0]) == len(src_lengths[0]) - assert len(labels) == 0 or len(labels) == len(src_tokens[0]) - - for i in range(self.args.num_classes): - src_lengths[i] = np.array(src_lengths[i]) - src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i]) - src_lengths[i] = ListDataset(src_lengths[i]) - - dataset = { - "id": IdDataset(), - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(src_tokens[0], reduce=True), - } - - for i in range(self.args.num_classes): - dataset.update( - { - "net_input{}".format(i + 1): { - "src_tokens": RightPadDataset( - src_tokens[i], - pad_idx=self.source_dictionary.pad(), - ), - "src_lengths": src_lengths[i], - } - } - ) - - if len(labels) > 0: - dataset.update({"target": RawLabelDataset(labels)}) - - dataset = NestedDictionaryDataset( - dataset, - sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], - ) - - with data_utils.numpy_seed(self.args.seed): - dataset = SortDataset( - dataset, - # shuffle - sort_order=[np.random.permutation(len(dataset))], - ) - - print("| Loaded {} with {} samples".format(split, len(dataset))) - - self.datasets[split] = dataset - return self.datasets[split] - - def build_model(self, args): - from fairseq import models - - model = models.build_model(args, self) - - model.register_classification_head( - "sentence_classification_head", - num_classes=1, - ) - - return model - - @property - def source_dictionary(self): - return self.vocab - - @property - def target_dictionary(self): - return self.vocab diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/README.md b/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/README.md deleted file mode 100644 index 21a045d999739836a17574593292e42131315ae9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# Finetuning RoBERTa on Winograd Schema Challenge (WSC) data - -The following instructions can be used to finetune RoBERTa on the WSC training -data provided by [SuperGLUE](https://super.gluebenchmark.com/). - -Note that there is high variance in the results. For our GLUE/SuperGLUE -submission we swept over the learning rate (1e-5, 2e-5, 3e-5), batch size (16, -32, 64) and total number of updates (500, 1000, 2000, 3000), as well as the -random seed. Out of ~100 runs we chose the best 7 models and ensembled them. - -**Approach:** The instructions below use a slightly different loss function than -what's described in the original RoBERTa arXiv paper. In particular, -[Kocijan et al. (2019)](https://arxiv.org/abs/1905.06290) introduce a margin -ranking loss between `(query, candidate)` pairs with tunable hyperparameters -alpha and beta. This is supported in our code as well with the `--wsc-alpha` and -`--wsc-beta` arguments. However, we achieved slightly better (and more robust) -results on the development set by instead using a single cross entropy loss term -over the log-probabilities for the query and all mined candidates. **The -candidates are mined using spaCy from each input sentence in isolation, so the -approach remains strictly pointwise.** This reduces the number of -hyperparameters and our best model achieved 92.3% development set accuracy, -compared to ~90% accuracy for the margin loss. Later versions of the RoBERTa -arXiv paper will describe this updated formulation. - -### 1) Download the WSC data from the SuperGLUE website: -```bash -wget https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip -unzip WSC.zip - -# we also need to copy the RoBERTa dictionary into the same directory -wget -O WSC/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt -``` - -### 2) Finetune over the provided training data: -```bash -TOTAL_NUM_UPDATES=2000 # Total number of training steps. -WARMUP_UPDATES=250 # Linearly increase LR over this many steps. -LR=2e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=16 # Batch size per GPU. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt - -# we use the --user-dir option to load the task and criterion -# from the examples/roberta/wsc directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc - -CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train WSC/ \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --valid-subset val \ - --fp16 --ddp-backend legacy_ddp \ - --user-dir $FAIRSEQ_USER_DIR \ - --task wsc --criterion wsc --wsc-cross-entropy \ - --arch roberta_large --bpe gpt2 --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $TOTAL_NUM_UPDATES \ - --log-format simple --log-interval 100 \ - --seed $SEED -``` - -The above command assumes training on 4 GPUs, but you can achieve the same -results on a single GPU by adding `--update-freq=4`. - -### 3) Evaluate -```python -from fairseq.models.roberta import RobertaModel -from examples.roberta.wsc import wsc_utils # also loads WSC task and criterion -roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'WSC/') -roberta.cuda() -nsamples, ncorrect = 0, 0 -for sentence, label in wsc_utils.jsonl_iterator('WSC/val.jsonl', eval=True): - pred = roberta.disambiguate_pronoun(sentence) - nsamples += 1 - if pred == label: - ncorrect += 1 -print('Accuracy: ' + str(ncorrect / float(nsamples))) -# Accuracy: 0.9230769230769231 -``` - -## RoBERTa training on WinoGrande dataset -We have also provided `winogrande` task and criterion for finetuning on the -[WinoGrande](https://mosaic.allenai.org/projects/winogrande) like datasets -where there are always two candidates and one is correct. -It's more efficient implementation for such subcases. - -```bash -TOTAL_NUM_UPDATES=23750 # Total number of training steps. -WARMUP_UPDATES=2375 # Linearly increase LR over this many steps. -LR=1e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=32 # Batch size per GPU. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt - -# we use the --user-dir option to load the task and criterion -# from the examples/roberta/wsc directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc - -cd fairseq -CUDA_VISIBLE_DEVICES=0 fairseq-train winogrande_1.0/ \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --valid-subset val \ - --fp16 --ddp-backend legacy_ddp \ - --user-dir $FAIRSEQ_USER_DIR \ - --task winogrande --criterion winogrande \ - --wsc-margin-alpha 5.0 --wsc-margin-beta 0.4 \ - --arch roberta_large --bpe gpt2 --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $TOTAL_NUM_UPDATES \ - --log-format simple --log-interval 100 -``` diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/text_to_speech/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/text_to_speech/__init__.py deleted file mode 100644 index 652fee0d685b61af47b314367037888fa640e1a7..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/text_to_speech/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .tacotron2 import * # noqa -from .tts_transformer import * # noqa -from .fastspeech2 import * # noqa diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/autobatch.py b/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/autobatch.py deleted file mode 100644 index bdeb91c3d2bd15e53eb65715228932d3e87e0989..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/autobatch.py +++ /dev/null @@ -1,72 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Auto-batch utils -""" - -from copy import deepcopy - -import numpy as np -import torch - -from utils.general import LOGGER, colorstr -from utils.torch_utils import profile - - -def check_train_batch_size(model, imgsz=640, amp=True): - # Check YOLOv5 training batch size - with torch.cuda.amp.autocast(amp): - return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size - - -def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory - # Usage: - # import torch - # from utils.autobatch import autobatch - # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) - # print(autobatch(model)) - - # Check device - prefix = colorstr('AutoBatch: ') - LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') - device = next(model.parameters()).device # get model device - if device.type == 'cpu': - LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') - return batch_size - if torch.backends.cudnn.benchmark: - LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') - return batch_size - - # Inspect CUDA memory - gb = 1 << 30 # bytes to GiB (1024 ** 3) - d = str(device).upper() # 'CUDA:0' - properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / gb # GiB total - r = torch.cuda.memory_reserved(device) / gb # GiB reserved - a = torch.cuda.memory_allocated(device) / gb # GiB allocated - f = t - (r + a) # GiB free - LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') - - # Profile batch sizes - batch_sizes = [1, 2, 4, 8, 16] - try: - img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] - results = profile(img, model, n=3, device=device) - except Exception as e: - LOGGER.warning(f'{prefix}{e}') - - # Fit a solution - y = [x[2] for x in results if x] # memory [2] - p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit - b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - if None in results: # some sizes failed - i = results.index(None) # first fail index - if b >= batch_sizes[i]: # y intercept above failure point - b = batch_sizes[max(i - 1, 0)] # select prior safe point - if b < 1 or b > 1024: # b outside of safe range - b = batch_size - LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - - fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted - LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') - return b diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/shanghainese.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Jamkonams/AutoGPT/autogpt/memory/milvus.py b/spaces/Jamkonams/AutoGPT/autogpt/memory/milvus.py deleted file mode 100644 index 44aa72b956224fa4c2a16d5f40b0eaeb35e98581..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/memory/milvus.py +++ /dev/null @@ -1,115 +0,0 @@ -""" Milvus memory storage provider.""" -from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections - -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding - - -class MilvusMemory(MemoryProviderSingleton): - """Milvus memory storage provider.""" - - def __init__(self, cfg) -> None: - """Construct a milvus memory storage connection. - - Args: - cfg (Config): Auto-GPT global config. - """ - # connect to milvus server. - connections.connect(address=cfg.milvus_addr) - fields = [ - FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), - FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), - FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535), - ] - - # create collection if not exist and load it. - self.milvus_collection = cfg.milvus_collection - self.schema = CollectionSchema(fields, "auto-gpt memory storage") - self.collection = Collection(self.milvus_collection, self.schema) - # create index if not exist. - if not self.collection.has_index(): - self.collection.release() - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - - def add(self, data) -> str: - """Add an embedding of data into memory. - - Args: - data (str): The raw text to construct embedding index. - - Returns: - str: log. - """ - embedding = get_ada_embedding(data) - result = self.collection.insert([[embedding], [data]]) - _text = ( - "Inserting data into memory at primary key: " - f"{result.primary_keys[0]}:\n data: {data}" - ) - return _text - - def get(self, data): - """Return the most relevant data in memory. - Args: - data: The data to compare to. - """ - return self.get_relevant(data, 1) - - def clear(self) -> str: - """Drop the index in memory. - - Returns: - str: log. - """ - self.collection.drop() - self.collection = Collection(self.milvus_collection, self.schema) - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - return "Obliviated" - - def get_relevant(self, data: str, num_relevant: int = 5): - """Return the top-k relevant data in memory. - Args: - data: The data to compare to. - num_relevant (int, optional): The max number of relevant data. - Defaults to 5. - - Returns: - list: The top-k relevant data. - """ - # search the embedding and return the most relevant text. - embedding = get_ada_embedding(data) - search_params = { - "metrics_type": "IP", - "params": {"nprobe": 8}, - } - result = self.collection.search( - [embedding], - "embeddings", - search_params, - num_relevant, - output_fields=["raw_text"], - ) - return [item.entity.value_of_field("raw_text") for item in result[0]] - - def get_stats(self) -> str: - """ - Returns: The stats of the milvus cache. - """ - return f"Entities num: {self.collection.num_entities}" diff --git a/spaces/Jeff2323/ai-comic-factory/src/app/interface/page/index.tsx b/spaces/Jeff2323/ai-comic-factory/src/app/interface/page/index.tsx deleted file mode 100644 index 9a4c4fbf9ee68d2e95234c4b33fee0b0b34fa4c1..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/app/interface/page/index.tsx +++ /dev/null @@ -1,59 +0,0 @@ -import { allLayouts } from "@/app/layouts" -import { useStore } from "@/app/store" -import { cn } from "@/lib/utils" -import { useEffect, useRef } from "react" - -export function Page({ page }: { page: number }) { - const zoomLevel = useStore(state => state.zoomLevel) - const layouts = useStore(state => state.layouts) - // const prompt = useStore(state => state.prompt) - - const LayoutElement = (allLayouts as any)[layouts[page]] - - /* - const [canLoad, setCanLoad] = useState(false) - useEffect(() => { - if (prompt?.length) { - setCanLoad(false) - setTimeout(() => { - setCanLoad(true) - }, page * 4000) - } - }, [prompt]) - */ - - const setPage = useStore(state => state.setPage) - const pageRef = useRef(null) - - useEffect(() => { - const element = pageRef.current - if (!element) { return } - setPage(element) - }, [pageRef.current]) - - return ( -
      100 ? `100`}` - }} - > - -
      - ) -} \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/invocable/paramater_types.py b/spaces/JeffJing/ZookChatBot/steamship/invocable/paramater_types.py deleted file mode 100644 index 546951f9a039baf5fa35b6a8b81aaa0a569f9c98..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/invocable/paramater_types.py +++ /dev/null @@ -1,10 +0,0 @@ -class longstr(str): # noqa: N801 - """Long string functions mostly as a type annotation for the web.""" - - pass - - -class fileurl(str): # noqa: N801 - """Type alias that, if used in a package method argument, will cause a file upload widget to appear.""" - - pass diff --git a/spaces/Jimpa666/AI-PadelCoach/README.md b/spaces/Jimpa666/AI-PadelCoach/README.md deleted file mode 100644 index 80c90728ed5ef590459b87a2dd74f7a995ceca24..0000000000000000000000000000000000000000 --- a/spaces/Jimpa666/AI-PadelCoach/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI PadelCoach -emoji: 🐨 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kieranm/britishmus_plate_material_classifier_space/app.py b/spaces/Kieranm/britishmus_plate_material_classifier_space/app.py deleted file mode 100644 index f230f3bce8dd5f7a036e203ce6a11e29679b7d8a..0000000000000000000000000000000000000000 --- a/spaces/Kieranm/britishmus_plate_material_classifier_space/app.py +++ /dev/null @@ -1,47 +0,0 @@ -#install huggingface_hub["fastai"] gradio timm -from huggingface_hub import from_pretrained_fastai -from gradio import Interface, inputs, outputs -from fastai.learner import Learner -import fastai - -repo_id = "Kieranm/britishmus_plate_material_classifier" - -learner = from_pretrained_fastai(repo_id) - -mappings = { - fastai.torch_core.TensorImage: { - "type": inputs.Image(type='file', label='input'), - "process": lambda inp : inp.name - }, - fastai.torch_core.TensorCategory: { - "type": outputs.Label(num_top_classes=3, label = 'output'), - "process": lambda dls, out: {dls.vocab[i]: float(out[2][i]) for i in range(len(dls.vocab))} - - } -} - -#Taken from fastgradio library - -class Demo: - def __init__(self, learner): - - self.learner = learner - self.types = getattr(self.learner.dls, '_types')[tuple] - - def learner_predict(self, inp): - inp = mappings[self.types[0]]["process"](inp) - prediction = self.learner.predict(inp) - output = mappings[self.types[1]]["process"](self.learner.dls, prediction) - return output - - def launch(self, share=True, debug=False, auth=None, **kwargs): - inputs = mappings[self.types[0]]["type"] - - outputs = mappings[self.types[1]]["type"] - - Interface(fn=self.learner_predict, inputs=inputs, outputs=outputs, - examples = ["examples/earthen1.jpg", "examples/earthen2.png", "examples/porcelain1.png", "examples/porcelain2.png"], - **kwargs).launch(share=share, debug=debug, auth=auth) - - -Demo(learner).launch() \ No newline at end of file diff --git a/spaces/Kyo-Kai/Fsg-pp/Dockerfile b/spaces/Kyo-Kai/Fsg-pp/Dockerfile deleted file mode 100644 index bdba50c33e6c0588a1d4082b8fdbcc023a69d69a..0000000000000000000000000000000000000000 --- a/spaces/Kyo-Kai/Fsg-pp/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM python:3.10 - -# Install Google Chrome -RUN apt-get update && apt-get install -y wget gnupg2 -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - -RUN echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list -RUN apt-get update && apt-get install -y google-chrome-stable - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -# Give executable permissions to chromedriver -RUN chmod +x $HOME/app/driver/chromedriver - -# Run the python file -#CMD ["python", "Fsg_pp.py"] -CMD “uvicorn”, “Fsg_pp:app”, “–host”, “0.0.0.0”, “–port”, “7860”] \ No newline at end of file diff --git a/spaces/LanguageBind/LanguageBind/training/distributed.py b/spaces/LanguageBind/LanguageBind/training/distributed.py deleted file mode 100644 index 268a6c7ad75a9ef29c72801dbf59d606f3318a59..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/training/distributed.py +++ /dev/null @@ -1,137 +0,0 @@ -import os - -import torch -import torch.distributed as dist - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - - -def is_global_master(args): - return args.rank == 0 - - -def is_local_master(args): - return args.local_rank == 0 - - -def is_master(args, local=False): - return is_local_master(args) if local else is_global_master(args) - - -def is_using_horovod(): - # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set - # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required... - ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"] - pmi_vars = ["PMI_RANK", "PMI_SIZE"] - if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]): - return True - else: - return False - - -def is_using_distributed(): - if 'WORLD_SIZE' in os.environ: - return int(os.environ['WORLD_SIZE']) > 1 - if 'SLURM_NTASKS' in os.environ: - return int(os.environ['SLURM_NTASKS']) > 1 - return False - - -def world_info_from_env(): - local_rank = 0 - for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'): - if v in os.environ: - local_rank = int(os.environ[v]) - break - global_rank = 0 - for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'): - if v in os.environ: - global_rank = int(os.environ[v]) - break - world_size = 1 - for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'): - if v in os.environ: - world_size = int(os.environ[v]) - break - - return local_rank, global_rank, world_size - - -def init_distributed_device(args): - # Distributed training = training on more than one GPU. - # Works in both single and multi-node scenarios. - args.distributed = False - args.world_size = 1 - args.rank = 0 # global rank - args.local_rank = 0 - if args.horovod: - assert hvd is not None, "Horovod is not installed" - hvd.init() - args.local_rank = int(hvd.local_rank()) - args.rank = hvd.rank() - args.world_size = hvd.size() - args.distributed = True - os.environ['LOCAL_RANK'] = str(args.local_rank) - os.environ['RANK'] = str(args.rank) - os.environ['WORLD_SIZE'] = str(args.world_size) - elif is_using_distributed(): - if 'SLURM_PROCID' in os.environ: - # DDP via SLURM - args.local_rank, args.rank, args.world_size = world_info_from_env() - # SLURM var -> torch.distributed vars in case needed - os.environ['LOCAL_RANK'] = str(args.local_rank) - os.environ['RANK'] = str(args.rank) - os.environ['WORLD_SIZE'] = str(args.world_size) - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - ) - else: - # DDP via torchrun, torch.distributed.launch - args.local_rank, _, _ = world_info_from_env() - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url) - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - args.distributed = True - - if torch.cuda.is_available(): - if args.distributed and not args.no_set_device_rank: - device = 'cuda:%d' % args.local_rank - else: - device = 'cuda:0' - torch.cuda.set_device(device) - else: - device = 'cpu' - args.device = device - device = torch.device(device) - return device - - -def broadcast_object(args, obj, src=0): - # broadcast a pickle-able python object from rank-0 to all ranks - if args.horovod: - return hvd.broadcast_object(obj, root_rank=src) - else: - if args.rank == src: - objects = [obj] - else: - objects = [None] - dist.broadcast_object_list(objects, src=src) - return objects[0] - - -def all_gather_object(args, obj, dst=0): - # gather a pickle-able python object across all ranks - if args.horovod: - return hvd.allgather_object(obj) - else: - objects = [None for _ in range(args.world_size)] - dist.all_gather_object(objects, obj) - return objects diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/master/master_toy_dataset.py b/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/master/master_toy_dataset.py deleted file mode 100644 index 3d0440240a28a2d64b2f0442cae7d628a7542f42..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/master/master_toy_dataset.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', '../../_base_/recog_models/master.py', - '../../_base_/schedules/schedule_adam_step_12e.py', - '../../_base_/recog_pipelines/master_pipeline.py', - '../../_base_/recog_datasets/toy_data.py' -] - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline = {{_base_.train_pipeline}} -test_pipeline = {{_base_.test_pipeline}} - -data = dict( - workers_per_gpu=2, - samples_per_gpu=8, - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline)) - -evaluation = dict(interval=1, metric='acc') diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Dockerfile b/spaces/MCkernick/Image_Restoration_Colorization/Dockerfile deleted file mode 100644 index 8764e0011f8e0b937674005354ca957317c23fd4..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM nvidia/cuda:11.1-base-ubuntu20.04 - -RUN apt update && DEBIAN_FRONTEND=noninteractive apt install git bzip2 wget unzip python3-pip python3-dev cmake libgl1-mesa-dev python-is-python3 libgtk2.0-dev -yq -ADD . /app -WORKDIR /app -RUN cd Face_Enhancement/models/networks/ &&\ - git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch &&\ - cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm . &&\ - cd ../../../ - -RUN cd Global/detection_models &&\ - git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch &&\ - cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm . &&\ - cd ../../ - -RUN cd Face_Detection/ &&\ - wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 &&\ - bzip2 -d shape_predictor_68_face_landmarks.dat.bz2 &&\ - cd ../ - -RUN cd Face_Enhancement/ &&\ - wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Face_Enhancement/checkpoints.zip &&\ - unzip checkpoints.zip &&\ - cd ../ &&\ - cd Global/ &&\ - wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Global/checkpoints.zip &&\ - unzip checkpoints.zip &&\ - rm -f checkpoints.zip &&\ - cd ../ - -RUN pip3 install numpy - -RUN pip3 install dlib - -RUN pip3 install -r requirements.txt - -RUN git clone https://github.com/NVlabs/SPADE.git - -RUN cd SPADE/ && pip3 install -r requirements.txt - -RUN cd .. - -CMD ["python3", "run.py"] diff --git a/spaces/Mahiruoshi/lovelive-ShojoKageki-vits/text/korean.py b/spaces/Mahiruoshi/lovelive-ShojoKageki-vits/text/korean.py deleted file mode 100644 index 4b6c3fb27532ae6c033023de8a32fc7379bb5431..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/lovelive-ShojoKageki-vits/text/korean.py +++ /dev/null @@ -1,205 +0,0 @@ -import re -from jamo import h2j, j2hcj -import ko_pron - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (ipa, lazy ipa) pairs: -_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('t͡ɕ','ʧ'), - ('d͡ʑ','ʥ'), - ('ɲ','n^'), - ('ɕ','ʃ'), - ('ʷ','w'), - ('ɭ','l`'), - ('ʎ','ɾ'), - ('ɣ','ŋ'), - ('ɰ','ɯ'), - ('ʝ','j'), - ('ʌ','ə'), - ('ɡ','g'), - ('\u031a','#'), - ('\u0348','='), - ('\u031e',''), - ('\u0320',''), - ('\u0339','') -]] - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - text = j2hcj(h2j(text)) - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def korean_to_lazy_ipa(text): - text = latin_to_hangul(text) - text = number_to_hangul(text) - text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa'),text).split('] ~ [')[0] - for regex, replacement in _ipa_to_lazy_ipa: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/dataset/reseed.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/dataset/reseed.py deleted file mode 100644 index 600c998fa33485c073af7f9e13e885350a5c6940..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/dataset/reseed.py +++ /dev/null @@ -1,6 +0,0 @@ -import torch -import random - -def reseed(seed): - random.seed(seed) - torch.manual_seed(seed) \ No newline at end of file diff --git a/spaces/ManDag004/animals/app.py b/spaces/ManDag004/animals/app.py deleted file mode 100644 index 43fbabbebe0cc3fea3d5c171bf124ca50d419e97..0000000000000000000000000000000000000000 --- a/spaces/ManDag004/animals/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from fastai.vision.all import * - -learn = load_learner("model.pkl") - -animal_list = ['cat', 'cow', 'deer', 'dog', 'donkey', 'goat', 'horse', 'pig', 'rabbit', 'sheep'] - -def classify_img(img): - animal, _, prob = learn.predict(img) - return dict(zip(animal_list, map(float, prob))) - -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() - - -intf = gr.Interface(fn=classify_img, inputs=image, outputs=label) -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/optimizer.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/optimizer.py deleted file mode 100644 index 4ef3e9ff8f9c6926e32bdf027612267b64ed80df..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/optimizer.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -from collections import defaultdict -from itertools import chain - -from torch.nn.utils import clip_grad - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version -from ..dist_utils import allreduce_grads -from ..fp16_utils import LossScaler, wrap_fp16_model -from .hook import HOOKS, Hook - -try: - # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported - # and used; otherwise, auto fp16 will adopt mmcv's implementation. - from torch.cuda.amp import GradScaler -except ImportError: - pass - - -@HOOKS.register_module() -class OptimizerHook(Hook): - - def __init__(self, grad_clip=None): - self.grad_clip = grad_clip - - def clip_grads(self, params): - params = list( - filter(lambda p: p.requires_grad and p.grad is not None, params)) - if len(params) > 0: - return clip_grad.clip_grad_norm_(params, **self.grad_clip) - - def after_train_iter(self, runner): - runner.optimizer.zero_grad() - runner.outputs['loss'].backward() - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - runner.optimizer.step() - - -@HOOKS.register_module() -class GradientCumulativeOptimizerHook(OptimizerHook): - """Optimizer Hook implements multi-iters gradient cumulating. - - Args: - cumulative_iters (int, optional): Num of gradient cumulative iters. - The optimizer will step every `cumulative_iters` iters. - Defaults to 1. - - Examples: - >>> # Use cumulative_iters to simulate a large batch size - >>> # It is helpful when the hardware cannot handle a large batch size. - >>> loader = DataLoader(data, batch_size=64) - >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4) - >>> # almost equals to - >>> loader = DataLoader(data, batch_size=256) - >>> optim_hook = OptimizerHook() - """ - - def __init__(self, cumulative_iters=1, **kwargs): - super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) - - assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \ - f'cumulative_iters only accepts positive int, but got ' \ - f'{type(cumulative_iters)} instead.' - - self.cumulative_iters = cumulative_iters - self.divisible_iters = 0 - self.remainder_iters = 0 - self.initialized = False - - def has_batch_norm(self, module): - if isinstance(module, _BatchNorm): - return True - for m in module.children(): - if self.has_batch_norm(m): - return True - return False - - def _init(self, runner): - if runner.iter % self.cumulative_iters != 0: - runner.logger.warning( - 'Resume iter number is not divisible by cumulative_iters in ' - 'GradientCumulativeOptimizerHook, which means the gradient of ' - 'some iters is lost and the result may be influenced slightly.' - ) - - if self.has_batch_norm(runner.model) and self.cumulative_iters > 1: - runner.logger.warning( - 'GradientCumulativeOptimizerHook may slightly decrease ' - 'performance if the model has BatchNorm layers.') - - residual_iters = runner.max_iters - runner.iter - - self.divisible_iters = ( - residual_iters // self.cumulative_iters * self.cumulative_iters) - self.remainder_iters = residual_iters - self.divisible_iters - - self.initialized = True - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - loss = runner.outputs['loss'] - loss = loss / loss_factor - loss.backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - runner.optimizer.step() - runner.optimizer.zero_grad() - - -if (TORCH_VERSION != 'parrots' - and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - - @HOOKS.register_module() - class Fp16OptimizerHook(OptimizerHook): - """FP16 optimizer hook (using PyTorch's implementation). - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, - to take care of the optimization procedure. - - Args: - loss_scale (float | str | dict): Scale factor configuration. - If loss_scale is a float, static loss scaling will be used with - the specified scale. If loss_scale is a string, it must be - 'dynamic', then dynamic loss scaling will be used. - It can also be a dict containing arguments of GradScalar. - Defaults to 512. For Pytorch >= 1.6, mmcv uses official - implementation of GradScaler. If you use a dict version of - loss_scale to create GradScaler, please refer to: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler - for the parameters. - - Examples: - >>> loss_scale = dict( - ... init_scale=65536.0, - ... growth_factor=2.0, - ... backoff_factor=0.5, - ... growth_interval=2000 - ... ) - >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale) - """ - - def __init__(self, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - loss_scale=512., - distributed=True): - self.grad_clip = grad_clip - self.coalesce = coalesce - self.bucket_size_mb = bucket_size_mb - self.distributed = distributed - self._scale_update_param = None - if loss_scale == 'dynamic': - self.loss_scaler = GradScaler() - elif isinstance(loss_scale, float): - self._scale_update_param = loss_scale - self.loss_scaler = GradScaler(init_scale=loss_scale) - elif isinstance(loss_scale, dict): - self.loss_scaler = GradScaler(**loss_scale) - else: - raise ValueError('loss_scale must be of type float, dict, or ' - f'"dynamic", got {loss_scale}') - - def before_run(self, runner): - """Preparing steps before Mixed Precision Training.""" - # wrap model mode to fp16 - wrap_fp16_model(runner.model) - # resume from state dict - if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: - scaler_state_dict = runner.meta['fp16']['loss_scaler'] - self.loss_scaler.load_state_dict(scaler_state_dict) - - def copy_grads_to_fp32(self, fp16_net, fp32_weights): - """Copy gradients from fp16 model to fp32 weight copy.""" - for fp32_param, fp16_param in zip(fp32_weights, - fp16_net.parameters()): - if fp16_param.grad is not None: - if fp32_param.grad is None: - fp32_param.grad = fp32_param.data.new( - fp32_param.size()) - fp32_param.grad.copy_(fp16_param.grad) - - def copy_params_to_fp16(self, fp16_net, fp32_weights): - """Copy updated params from fp32 weight copy to fp16 model.""" - for fp16_param, fp32_param in zip(fp16_net.parameters(), - fp32_weights): - fp16_param.data.copy_(fp32_param.data) - - def after_train_iter(self, runner): - """Backward optimization steps for Mixed Precision Training. For - dynamic loss scaling, please refer to - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler. - - 1. Scale the loss by a scale factor. - 2. Backward the loss to obtain the gradients. - 3. Unscale the optimizer’s gradient tensors. - 4. Call optimizer.step() and update scale factor. - 5. Save loss_scaler state_dict for resume purpose. - """ - # clear grads of last iteration - runner.model.zero_grad() - runner.optimizer.zero_grad() - - self.loss_scaler.scale(runner.outputs['loss']).backward() - self.loss_scaler.unscale_(runner.optimizer) - # grad clip - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # backward and update scaler - self.loss_scaler.step(runner.optimizer) - self.loss_scaler.update(self._scale_update_param) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - @HOOKS.register_module() - class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, - Fp16OptimizerHook): - """Fp16 optimizer Hook (using PyTorch's implementation) implements - multi-iters gradient cumulating. - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, - to take care of the optimization procedure. - """ - - def __init__(self, *args, **kwargs): - super(GradientCumulativeFp16OptimizerHook, - self).__init__(*args, **kwargs) - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - loss = runner.outputs['loss'] - loss = loss / loss_factor - - self.loss_scaler.scale(loss).backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - # copy fp16 grads in the model to fp32 params in the optimizer - self.loss_scaler.unscale_(runner.optimizer) - - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - - # backward and update scaler - self.loss_scaler.step(runner.optimizer) - self.loss_scaler.update(self._scale_update_param) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - # clear grads - runner.model.zero_grad() - runner.optimizer.zero_grad() - -else: - - @HOOKS.register_module() - class Fp16OptimizerHook(OptimizerHook): - """FP16 optimizer hook (mmcv's implementation). - - The steps of fp16 optimizer is as follows. - 1. Scale the loss value. - 2. BP in the fp16 model. - 2. Copy gradients from fp16 model to fp32 weights. - 3. Update fp32 weights. - 4. Copy updated parameters from fp32 weights to fp16 model. - - Refer to https://arxiv.org/abs/1710.03740 for more details. - - Args: - loss_scale (float | str | dict): Scale factor configuration. - If loss_scale is a float, static loss scaling will be used with - the specified scale. If loss_scale is a string, it must be - 'dynamic', then dynamic loss scaling will be used. - It can also be a dict containing arguments of LossScaler. - Defaults to 512. - """ - - def __init__(self, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - loss_scale=512., - distributed=True): - self.grad_clip = grad_clip - self.coalesce = coalesce - self.bucket_size_mb = bucket_size_mb - self.distributed = distributed - if loss_scale == 'dynamic': - self.loss_scaler = LossScaler(mode='dynamic') - elif isinstance(loss_scale, float): - self.loss_scaler = LossScaler( - init_scale=loss_scale, mode='static') - elif isinstance(loss_scale, dict): - self.loss_scaler = LossScaler(**loss_scale) - else: - raise ValueError('loss_scale must be of type float, dict, or ' - f'"dynamic", got {loss_scale}') - - def before_run(self, runner): - """Preparing steps before Mixed Precision Training. - - 1. Make a master copy of fp32 weights for optimization. - 2. Convert the main model from fp32 to fp16. - """ - # keep a copy of fp32 weights - old_groups = runner.optimizer.param_groups - runner.optimizer.param_groups = copy.deepcopy( - runner.optimizer.param_groups) - state = defaultdict(dict) - p_map = { - old_p: p - for old_p, p in zip( - chain(*(g['params'] for g in old_groups)), - chain(*(g['params'] - for g in runner.optimizer.param_groups))) - } - for k, v in runner.optimizer.state.items(): - state[p_map[k]] = v - runner.optimizer.state = state - # convert model to fp16 - wrap_fp16_model(runner.model) - # resume from state dict - if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: - scaler_state_dict = runner.meta['fp16']['loss_scaler'] - self.loss_scaler.load_state_dict(scaler_state_dict) - - def copy_grads_to_fp32(self, fp16_net, fp32_weights): - """Copy gradients from fp16 model to fp32 weight copy.""" - for fp32_param, fp16_param in zip(fp32_weights, - fp16_net.parameters()): - if fp16_param.grad is not None: - if fp32_param.grad is None: - fp32_param.grad = fp32_param.data.new( - fp32_param.size()) - fp32_param.grad.copy_(fp16_param.grad) - - def copy_params_to_fp16(self, fp16_net, fp32_weights): - """Copy updated params from fp32 weight copy to fp16 model.""" - for fp16_param, fp32_param in zip(fp16_net.parameters(), - fp32_weights): - fp16_param.data.copy_(fp32_param.data) - - def after_train_iter(self, runner): - """Backward optimization steps for Mixed Precision Training. For - dynamic loss scaling, please refer `loss_scalar.py` - - 1. Scale the loss by a scale factor. - 2. Backward the loss to obtain the gradients (fp16). - 3. Copy gradients from the model to the fp32 weight copy. - 4. Scale the gradients back and update the fp32 weight copy. - 5. Copy back the params from fp32 weight copy to the fp16 model. - 6. Save loss_scaler state_dict for resume purpose. - """ - # clear grads of last iteration - runner.model.zero_grad() - runner.optimizer.zero_grad() - # scale the loss value - scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale - scaled_loss.backward() - # copy fp16 grads in the model to fp32 params in the optimizer - - fp32_weights = [] - for param_group in runner.optimizer.param_groups: - fp32_weights += param_group['params'] - self.copy_grads_to_fp32(runner.model, fp32_weights) - # allreduce grads - if self.distributed: - allreduce_grads(fp32_weights, self.coalesce, - self.bucket_size_mb) - - has_overflow = self.loss_scaler.has_overflow(fp32_weights) - # if has overflow, skip this iteration - if not has_overflow: - # scale the gradients back - for param in fp32_weights: - if param.grad is not None: - param.grad.div_(self.loss_scaler.loss_scale) - if self.grad_clip is not None: - grad_norm = self.clip_grads(fp32_weights) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # update fp32 params - runner.optimizer.step() - # copy fp32 params to the fp16 model - self.copy_params_to_fp16(runner.model, fp32_weights) - self.loss_scaler.update_scale(has_overflow) - if has_overflow: - runner.logger.warning('Check overflow, downscale loss scale ' - f'to {self.loss_scaler.cur_scale}') - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - @HOOKS.register_module() - class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, - Fp16OptimizerHook): - """Fp16 optimizer Hook (using mmcv implementation) implements multi- - iters gradient cumulating.""" - - def __init__(self, *args, **kwargs): - super(GradientCumulativeFp16OptimizerHook, - self).__init__(*args, **kwargs) - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - - loss = runner.outputs['loss'] - loss = loss / loss_factor - - # scale the loss value - scaled_loss = loss * self.loss_scaler.loss_scale - scaled_loss.backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - # copy fp16 grads in the model to fp32 params in the optimizer - fp32_weights = [] - for param_group in runner.optimizer.param_groups: - fp32_weights += param_group['params'] - self.copy_grads_to_fp32(runner.model, fp32_weights) - # allreduce grads - if self.distributed: - allreduce_grads(fp32_weights, self.coalesce, - self.bucket_size_mb) - - has_overflow = self.loss_scaler.has_overflow(fp32_weights) - # if has overflow, skip this iteration - if not has_overflow: - # scale the gradients back - for param in fp32_weights: - if param.grad is not None: - param.grad.div_(self.loss_scaler.loss_scale) - if self.grad_clip is not None: - grad_norm = self.clip_grads(fp32_weights) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # update fp32 params - runner.optimizer.step() - # copy fp32 params to the fp16 model - self.copy_params_to_fp16(runner.model, fp32_weights) - else: - runner.logger.warning( - 'Check overflow, downscale loss scale ' - f'to {self.loss_scaler.cur_scale}') - - self.loss_scaler.update_scale(has_overflow) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - # clear grads - runner.model.zero_grad() - runner.optimizer.zero_grad() diff --git a/spaces/MrD05/text-generation-webui-space/api-example-stream.py b/spaces/MrD05/text-generation-webui-space/api-example-stream.py deleted file mode 100644 index a5ed420252fdceab73cc26d83a7b87f60981ec95..0000000000000000000000000000000000000000 --- a/spaces/MrD05/text-generation-webui-space/api-example-stream.py +++ /dev/null @@ -1,90 +0,0 @@ -''' - -Contributed by SagsMug. Thank you SagsMug. -https://github.com/oobabooga/text-generation-webui/pull/175 - -''' - -import asyncio -import json -import random -import string - -import websockets - - -def random_hash(): - letters = string.ascii_lowercase + string.digits - return ''.join(random.choice(letters) for i in range(9)) - -async def run(context): - server = "127.0.0.1" - params = { - 'max_new_tokens': 200, - 'do_sample': True, - 'temperature': 0.5, - 'top_p': 0.9, - 'typical_p': 1, - 'repetition_penalty': 1.05, - 'top_k': 0, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': False, - } - session = random_hash() - - async with websockets.connect(f"ws://{server}:7860/queue/join") as websocket: - while content := json.loads(await websocket.recv()): - #Python3.10 syntax, replace with if elif on older - match content["msg"]: - case "send_hash": - await websocket.send(json.dumps({ - "session_hash": session, - "fn_index": 7 - })) - case "estimation": - pass - case "send_data": - await websocket.send(json.dumps({ - "session_hash": session, - "fn_index": 7, - "data": [ - context, - params['max_new_tokens'], - params['do_sample'], - params['temperature'], - params['top_p'], - params['typical_p'], - params['repetition_penalty'], - params['top_k'], - params['min_length'], - params['no_repeat_ngram_size'], - params['num_beams'], - params['penalty_alpha'], - params['length_penalty'], - params['early_stopping'], - ] - })) - case "process_starts": - pass - case "process_generating" | "process_completed": - yield content["output"]["data"][0] - # You can search for your desired end indicator and - # stop generation by closing the websocket here - if (content["msg"] == "process_completed"): - break - -prompt = "What I would like to say is the following: " - -async def get_result(): - async for response in run(prompt): - # Print intermediate steps - print(response) - - # Print final result - print(response) - -asyncio.run(get_result()) diff --git a/spaces/MrVicente/RA-BART/inference.py b/spaces/MrVicente/RA-BART/inference.py deleted file mode 100644 index 92a1c3d31b9ece4cdf1ad9f01c71d13673b214ae..0000000000000000000000000000000000000000 --- a/spaces/MrVicente/RA-BART/inference.py +++ /dev/null @@ -1,349 +0,0 @@ -############################# -# Imports -############################# - -# Python modules -from typing import List - -# Remote modules -import numpy as np -import torch - -# Local modules -from kgs_binding.relation_mapper_builder import RelationsMapperBuilder -from kgs_binding.kg_qa_binding_utils import load_kg_handler -from data.relation_utils import clean_relations -from model_utils import create_layers_head_mask - -from transformers import ( - BartForConditionalGeneration, - BartTokenizer, - BartConfig, - DisjunctiveConstraint, -) - -from utils import get_jump_chunks - -############################# -# Constants -############################# - -############################# -# Stuff -############################# -from custom_tokenizer import BartCustomTokenizerFast -from custom_bart import BartCustomConfig, BartCustomForConditionalGeneration -from utils import get_device, KGType, Model_Type - -from kgs_binding.kg_base_wrapper import KGBaseHandler -from kgs_binding.swow_handler import SwowHandler -from kgs_binding.conceptnet_handler import ConceptNetHandler - -class Inference: - def __init__(self, model_path:str, max_length=32): - self.device = get_device() - self.tokenizer = self.prepare_tokenizer() - self.model = self.prepare_model(model_path) - self.max_length = max_length - - def prepare_tokenizer(self): - tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') - return tokenizer - - def prepare_model(self, model_path): - config = BartConfig.from_pretrained(model_path) - model = BartForConditionalGeneration.from_pretrained(model_path, config=config).to(self.device) - model.eval() - return model - - def pre_process_context(self, context): - context = context.lower() - context_tokenized = self.tokenizer(context, padding='max_length', - truncation='longest_first', max_length=self.max_length, - return_tensors="pt", - ) - return context_tokenized - - def generate_based_on_context(self, context): - model_input = self.pre_process_context(context) - generated_answers_encoded = self.model.generate(input_ids=model_input["input_ids"].to(self.device), - attention_mask=model_input["attention_mask"].to(self.device), - min_length=1, - max_length=self.max_length, - do_sample=True, - early_stopping=True, - num_beams=4, - temperature=1.0, - top_k=None, - top_p=None, - # eos_token_id=tokenizer.eos_token_id, - no_repeat_ngram_size=2, - num_return_sequences=1, - return_dict_in_generate=True, - output_attentions=True, - output_scores=True) - # print(f'Scores: {generated_answers_encoded}') - response = self.tokenizer.batch_decode(generated_answers_encoded['sequences'], skip_special_tokens=True, - clean_up_tokenization_spaces=True) - encoder_attentions = generated_answers_encoded['encoder_attentions'] - return response, encoder_attentions, model_input - - def prepare_context_for_visualization(self, context): - examples = [] - response, encoder_outputs, model_input = self.generate_based_on_context(context) - encoder_outputs = torch.stack(encoder_outputs) - n_layers, batch_size, n_heads, src, tgt = encoder_outputs.size() - print(encoder_outputs.size()) - encoder_attentions = encoder_outputs.view(batch_size, n_layers, n_heads, src, tgt) - for i, ex in enumerate(encoder_attentions): - d = {} - indices = model_input['input_ids'][i].detach().cpu() - all_tokens = self.tokenizer.convert_ids_to_tokens(indices) - useful_indeces = indices != self.tokenizer.pad_token_id - all_tokens = np.array(all_tokens)[useful_indeces] - all_tokens = [tok.replace('Ġ', '') for tok in all_tokens] - d['words'] = all_tokens - d['attentions'] = ex.detach().cpu().numpy() - examples.append(d) - print(d['words']) - return response, examples - -class RelationsInference: - def __init__(self, model_path:str, kg_type: KGType, model_type:Model_Type, max_length=32): - self.device = get_device() - kg_handler: KGBaseHandler = load_kg_handler(kg_type) - self.kg_handler = kg_handler - relation_names = kg_handler.get_relation_types() - self.tokenizer = self.prepare_tokenizer(relation_names, model_type) - self.simple_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') - self.model, self.config = self.prepare_model(relation_names, model_path, model_type) - self.relation_mapper_builder = RelationsMapperBuilder(knowledge=kg_handler) - self.max_length = max_length - - def prepare_tokenizer(self, relation_names: List[str], model_type:Model_Type): - tokenizer = BartCustomTokenizerFast.from_pretrained('facebook/bart-large') - tokenizer.set_known_relation_names(relation_names) - tokenizer.set_operation_mode(there_is_difference_between_relations=model_type.there_is_difference_between_relations()) - return tokenizer - - def prepare_model(self, relation_names: List[str], model_path, model_type:Model_Type): - config = BartCustomConfig.from_pretrained(model_path, revision='master') - print('config.heads_mask:', config.heads_mask) - if config.num_relation_kinds is None: - config.num_relation_kinds = len(relation_names) - if config.is_simple_mask_commonsense is None: - config.is_simple_mask_commonsense = model_type.is_simple_mask_commonsense() - if config.heads_mask is None: - config.heads_mask = create_layers_head_mask(config)#, heads_mask_type, specific_heads) - model = BartCustomForConditionalGeneration.from_pretrained(model_path, config=config, revision='master').to(self.device) - model.eval() - return model, config - - def pre_process_context(self, context): - context = context.lower() - # process context in search for relations - commonsense_relations = self.relation_mapper_builder.get_relations_mapping_complex(context=[context], clear_common_wds=True) - # clean relation - commonsense_relation = clean_relations(commonsense_relations)[0] - # convert this relations to matrices - print(commonsense_relation) - context_tokenized = self.tokenizer(context, padding='max_length', - truncation='longest_first', max_length=self.max_length, - return_tensors="pt", return_offsets_mapping=True, - input_commonsense_relations=commonsense_relation, - ) - return context_tokenized - - def get_relations_information(self, phrase_generated): - all_concepts = self.relation_mapper_builder.get_kg_concepts_from_context([phrase_generated], clear_common_wds=True)[0] - words = phrase_generated.strip().split(' ') # all words - concepts_with_relations = self.relation_mapper_builder.get_concepts_from_context(phrase_generated, clear_common_wds=True) - concepts_with_no_relations = list(set(all_concepts).difference(concepts_with_relations)) - #print('without_relations:', concepts_with_no_relations) - print("====== RELATIONS SUMMARY ======") - print('phrase_generated:', phrase_generated) - print('words:', words) - print('all_concepts:', all_concepts) - print('concepts_with_relations:', concepts_with_relations) - print('without_relations:', concepts_with_no_relations) - print("\n== STATS:") - print('n_words:', len(words)) - print('n_concepts:', len(all_concepts)) - print('n_concepts_with_relations:', len(concepts_with_relations)) - print('n_c_without_relations:', len(concepts_with_no_relations)) - print("====== ================= ======") - return words, all_concepts, concepts_with_relations, concepts_with_no_relations - - def remove_subsets(self, l): - l2 = l[:] - for m in l: - for n in l: - if set(m).issubset(set(n)) and m != n: - l2.remove(m) - break - return l2 - - def generate_based_on_context(self, context, use_kg=False): - model_input = self.pre_process_context(context) - #print(model_input) - gen_kwargs = {} - if "input_commonsense_relations" in model_input: - #print(model_input['input_commonsense_relations'].sum()) - gen_kwargs["relation_inputs"] = model_input.get("input_commonsense_relations").to(self.device) - - constraints = None - if use_kg: - constraints = [] - concepts_from_context = self.relation_mapper_builder.get_concepts_from_context(context=context, clear_common_wds=True) - useful_concepts = [self.relation_mapper_builder.knowledge.get_related_concepts(concept) for concept in concepts_from_context] - if not useful_concepts: - useful_concepts = [self.kg_handler.get_related_concepts(concept) for concept in concepts_from_context] - useful_concepts = [[f'{phrase}' for phrase in concepts] for concepts in useful_concepts] # add spaces - #useful_concepts = [[phrase for phrase in concepts if len(phrase.split(' ')) == 1] for concepts in useful_concepts] - #useful_concepts = list(itertools.chain.from_iterable(useful_concepts)) - #print('useful_concepts:', useful_concepts[:5]) - if concepts_from_context: - for context_concept, neighbour_concepts in zip(concepts_from_context, useful_concepts): - print('neighbour:', neighbour_concepts[:20]) - #flexible_words = self.most_similar_words(context_concept, neighbour_concepts) # limit the upperbound - #flexible_words = [word for word in flexible_words if word not in context_concept] # remove input concepts - flexible_words = [word for word in neighbour_concepts if word not in context_concept] # remove input concepts - flexible_words_ids: List[List[int]] = self.simple_tokenizer(flexible_words, add_prefix_space=True,add_special_tokens=False).input_ids - flexible_words_ids = self.remove_subsets(flexible_words_ids) - #add_prefix_space=True - #flexible_words_ids = [x for x in flexible_words_ids if len(x) == 1] # problem with subsets - flexible_words_ids = flexible_words_ids[:10] - print('flexible_words_ids:', flexible_words_ids[:3]) - constraint = DisjunctiveConstraint(flexible_words_ids) - constraints.append(constraint) - else: - constraints = None - - generated_answers_encoded = self.model.generate(input_ids=model_input["input_ids"].to(self.device), - attention_mask=model_input["attention_mask"].to(self.device), - constraints=constraints, - min_length=1, - max_length=self.max_length, - do_sample=False, - early_stopping=True, - num_beams=8, - temperature=1.0, - top_k=None, - top_p=None, - # eos_token_id=tokenizer.eos_token_id, - no_repeat_ngram_size=2, - num_return_sequences=1, - return_dict_in_generate=True, - output_attentions=True, - output_scores=True, - **gen_kwargs, - ) - # print(f'Scores: {generated_answers_encoded}') - response = self.tokenizer.batch_decode(generated_answers_encoded['sequences'], skip_special_tokens=True, - clean_up_tokenization_spaces=True) - encoder_attentions = generated_answers_encoded['encoder_attentions'] - return response, encoder_attentions, model_input - - def get_related_concepts_list(self, knowledge, list_concepts): - other_concepts = [] - for concept in list_concepts: - other_near_concepts = knowledge.get_related_concepts(concept) - other_concepts.extend(other_near_concepts) - return other_concepts - - - def generate_contrained_based_on_context(self, contexts, use_kg=True, max_concepts=1): - model_inputs = [self.pre_process_context(context) for context in contexts] - constraints = None - if use_kg: - constraints = [] - concepts_from_contexts = [self.relation_mapper_builder.get_concepts_from_context(context=context, clear_common_wds=True) for context in contexts] - neighbours_contexts = []#[self.get_related_concepts_list(self.relation_mapper_builder.knowledge, context) for context in concepts_from_contexts] - if not neighbours_contexts: - neighbours_contexts = [self.get_related_concepts_list(self.kg_handler, context) for context in concepts_from_contexts] - all_constraints = [] - for context_neighbours in neighbours_contexts: - # context_neighbours is a collection of concepts - # lets create sub collections of concepts - context_neighbours = [f' {concept}' for concept in context_neighbours if len(concept) > 3] - n_size_chuncks = len(context_neighbours) // max_concepts - n_size_chuncks = n_size_chuncks if n_size_chuncks > 0 else 1 - sub_concepts_collection = list(get_jump_chunks(context_neighbours, jump=n_size_chuncks)) - constraints = [] - for sub_concepts in sub_concepts_collection[:max_concepts]: - flexible_words_ids: List[List[int]] = self.tokenizer(sub_concepts, - add_special_tokens=False).input_ids # add_prefix_space=True, - # flexible_words_ids = self.remove_subsets(flexible_words_ids) - flexible_words_ids = [[word_ids[0]] for word_ids in flexible_words_ids] - disjunctive_set = list(map(list, set(map(frozenset, flexible_words_ids)))) - if not any(disjunctive_set): - continue - constraint = DisjunctiveConstraint(disjunctive_set) - constraints.append(constraint) - if not any(constraints): - constraints = None - all_constraints.append(constraints) - else: - all_constraints = None - if not all_constraints: - all_constraints = None - - generated_answers_encoded = [] - encoder_attentions_list = [] - for i, contraints in enumerate(all_constraints): - #print('contraints.token_ids:', [x.token_ids for x in contraints]) - gen_kwargs = {} - inputs = model_inputs[i] - if "input_commonsense_relations" in inputs: - # print(model_input['input_commonsense_relations'].sum()) - gen_kwargs["relation_inputs"] = inputs.get("input_commonsense_relations").to(self.device) - #print('model_kwargs.get("attention_mask"):', model_kwargs.get("attention_mask")) - gen = self.model.generate(input_ids=inputs["input_ids"].to(self.device), - attention_mask=inputs["attention_mask"].to(self.device), - constraints=constraints, - min_length=1, - max_length=self.max_length, - do_sample=False, - early_stopping=True, - num_beams=8, - temperature=1.0, - top_k=None, - top_p=None, - # eos_token_id=tokenizer.eos_token_id, - no_repeat_ngram_size=2, - num_return_sequences=1, - return_dict_in_generate=True, - output_attentions=True, - output_scores=True, - **gen_kwargs, - ) - # print('[gen]:', gen) - # print(tokenizer.batch_decode(gen)) - generated_answers_encoded.append(gen['sequences'][0].detach().cpu()) - encoder_attentions_list.append(gen['encoder_attentions'][0].detach().cpu()) - # print(f'Scores: {generated_answers_encoded}') - text_results = self.tokenizer.batch_decode(generated_answers_encoded, skip_special_tokens=True, - clean_up_tokenization_spaces=True) - return text_results, encoder_attentions_list, model_inputs - - def prepare_context_for_visualization(self, context): - examples, relations = [], [] - response, encoder_outputs, model_input = self.generate_based_on_context(context) - input_commonsense_relations = model_input.get("input_commonsense_relations") - encoder_outputs = torch.stack(encoder_outputs) - n_layers, batch_size, n_heads, src, tgt = encoder_outputs.size() - print(encoder_outputs.size()) - encoder_attentions = encoder_outputs.view(batch_size, n_layers, n_heads, src, tgt) - for i, ex in enumerate(encoder_attentions): - d = {} - indices = model_input['input_ids'][i].detach().cpu() - all_tokens = self.tokenizer.convert_ids_to_tokens(indices) - useful_indeces = indices != self.tokenizer.pad_token_id - all_tokens = np.array(all_tokens)[useful_indeces] - all_tokens = [tok.replace('Ġ', '') for tok in all_tokens] - d['words'] = all_tokens - d['attentions'] = ex.detach().cpu().numpy() - examples.append(d) - relations.append(input_commonsense_relations[i]) - print(d['words']) - return response, examples, relations diff --git a/spaces/MultiTransformer/snake_by_princepspolycap/snake_dev_team.py b/spaces/MultiTransformer/snake_by_princepspolycap/snake_dev_team.py deleted file mode 100644 index b2cb8adcfaba9aa247aac26df302a47b85ef4c22..0000000000000000000000000000000000000000 --- a/spaces/MultiTransformer/snake_by_princepspolycap/snake_dev_team.py +++ /dev/null @@ -1,88 +0,0 @@ -from autogen import AssistantAgent, UserProxyAgent, config_list_from_json, GroupChat, GroupChatManager - -# Load the configuration for GPT-4 from a JSON file -config_list_gpt4 = config_list_from_json( - "../OAI_CONFIG_LIST.json", - filter_dict={ - "model": ["gpt-4-0613, gpt-4-32k, gpt-4, gpt-4-0314"], - }, -) - -# Define the GPT-4 configuration parameters -gpt4_config = { - "seed": 42, - "temperature": 0, - "config_list": config_list_gpt4, - "request_timeout": 1200, -} - -# Define the common working directory for all agents -working_directory = "game_files" - -# Initialize the Player agent, responsible for providing gameplay feedback -player = UserProxyAgent( - name="Player", - system_message="Player: Your role is to provide feedback on the gameplay. Collaborate with the Game Designer to ensure the game meets desired expectations.", - code_execution_config={ - "work_dir": working_directory, - "use_docker": False, - "timeout": 120, - "last_n_messages": 1, - }, -) - -# Initialize the Game Designer agent, responsible for designing the game -game_designer = AssistantAgent( - name="Game_Designer", - llm_config=gpt4_config, - system_message="Game Designer: Design the snake game, ensuring all details are documented in 'game_design.txt'. Collaborate with the Player to align the design with feedback and expectations." -) - -# Initialize the Programmer agent, responsible for coding the game -programmer = AssistantAgent( - name="Programmer", - llm_config=gpt4_config, - system_message="Programmer: Code the snake game and save it in the working directory. For code execution, collaborate with the Code Executor. If feedback is needed, consult the Game Tester." -) - -# Initialize the Game Tester agent, responsible for playtesting the game -game_tester = UserProxyAgent( - name="Game_Tester", - system_message="Game Tester: Playtest the game and provide feedback on gameplay mechanics and user experience. Report any bugs or glitches. Collaborate with the Programmer for any necessary adjustments.", - code_execution_config={ - "work_dir": working_directory, - "use_docker": False, - "timeout": 120, - "last_n_messages": 3, - }, - human_input_mode="ALWAYS", -) - -# Initialize the Code Executor agent, responsible for executing the game code -code_executor = UserProxyAgent( - name="Code_Executor", - system_message="Code Executor: Execute the provided code from the Programmer in the designated environment. Report outcomes and potential issues. Ensure the code follows best practices and recommend enhancements to the Programmer.", - code_execution_config={ - "work_dir": working_directory, - "use_docker": False, - "timeout": 120, - "last_n_messages": 3, - }, - human_input_mode="NEVER", -) - -# Set up the group chat with all the agents -groupchat = GroupChat( - agents=[player, game_tester, game_designer, programmer, code_executor], - messages=[], - max_round=150 -) - -# Create a manager for the group chat using the GPT-4 configuration -manager = GroupChatManager(groupchat=groupchat, llm_config=gpt4_config) - -# Start the conversation with the Player's message -player.initiate_chat( - manager, - message="Let's design and implement a snake game. I aim for it to be entertaining and challenging." -) diff --git a/spaces/NATSpeech/DiffSpeech/data_gen/tts/wav_processors/common_processors.py b/spaces/NATSpeech/DiffSpeech/data_gen/tts/wav_processors/common_processors.py deleted file mode 100644 index ee5dd78c1b37710f0e5079fed7fdd092bf68eba1..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/data_gen/tts/wav_processors/common_processors.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import subprocess -import librosa -import numpy as np -from data_gen.tts.wav_processors.base_processor import BaseWavProcessor, register_wav_processors -from utils.audio import trim_long_silences -from utils.audio.io import save_wav -from utils.audio.rnnoise import rnnoise -from utils.commons.hparams import hparams - - -@register_wav_processors(name='sox_to_wav') -class ConvertToWavProcessor(BaseWavProcessor): - @property - def name(self): - return 'ToWav' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - if input_fn[-4:] == '.wav': - return input_fn, sr - else: - output_fn = self.output_fn(input_fn) - subprocess.check_call(f'sox -v 0.95 "{input_fn}" -t wav "{output_fn}"', shell=True) - return output_fn, sr - - -@register_wav_processors(name='sox_resample') -class ResampleProcessor(BaseWavProcessor): - @property - def name(self): - return 'Resample' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - sr_file = librosa.core.get_samplerate(input_fn) - if sr != sr_file: - subprocess.check_call(f'sox -v 0.95 "{input_fn}" -r{sr} "{output_fn}"', shell=True) - y, _ = librosa.core.load(input_fn, sr=sr) - y, _ = librosa.effects.trim(y) - save_wav(y, output_fn, sr) - return output_fn, sr - else: - return input_fn, sr - - -@register_wav_processors(name='trim_sil') -class TrimSILProcessor(BaseWavProcessor): - @property - def name(self): - return 'TrimSIL' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - y, _ = librosa.core.load(input_fn, sr=sr) - y, _ = librosa.effects.trim(y) - save_wav(y, output_fn, sr) - return output_fn - - -@register_wav_processors(name='trim_all_sil') -class TrimAllSILProcessor(BaseWavProcessor): - @property - def name(self): - return 'TrimSIL' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - y, audio_mask, _ = trim_long_silences( - input_fn, vad_max_silence_length=preprocess_args.get('vad_max_silence_length', 12)) - save_wav(y, output_fn, sr) - if preprocess_args['save_sil_mask']: - os.makedirs(f'{processed_dir}/sil_mask', exist_ok=True) - np.save(f'{processed_dir}/sil_mask/{item_name}.npy', audio_mask) - return output_fn, sr - - -@register_wav_processors(name='denoise') -class DenoiseProcessor(BaseWavProcessor): - @property - def name(self): - return 'Denoise' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - rnnoise(input_fn, output_fn, out_sample_rate=sr) - return output_fn, sr diff --git a/spaces/NCTCMumbai/NCTC/models/README.md b/spaces/NCTCMumbai/NCTC/models/README.md deleted file mode 100644 index 5b52e4a5cf41f949c2cf85744ea297ec3c324004..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/README.md +++ /dev/null @@ -1,39 +0,0 @@ -![Logo](https://storage.googleapis.com/model_garden_artifacts/TF_Model_Garden.png) - -# Welcome to the Model Garden for TensorFlow - -The TensorFlow Model Garden is a repository with a number of different implementations of state-of-the-art (SOTA) models and modeling solutions for TensorFlow users. We aim to demonstrate the best practices for modeling so that TensorFlow users -can take full advantage of TensorFlow for their research and product development. - -| Directory | Description | -|-----------|-------------| -| [official](official) | • A collection of example implementations for SOTA models using the latest TensorFlow 2's high-level APIs
      • Officially maintained, supported, and kept up to date with the latest TensorFlow 2 APIs by TensorFlow
      • Reasonably optimized for fast performance while still being easy to read | -| [research](research) | • A collection of research model implementations in TensorFlow 1 or 2 by researchers
      • Maintained and supported by researchers | -| [community](community) | • A curated list of the GitHub repositories with machine learning models and implementations powered by TensorFlow 2 | - -## [Announcements](https://github.com/tensorflow/models/wiki/Announcements) - -| Date | News | -|------|------| -| June 17, 2020 | [Context R-CNN: Long Term Temporal Context for Per-Camera Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection#june-17th-2020) released -| May 21, 2020 | [Unifying Deep Local and Global Features for Image Search (DELG)](https://github.com/tensorflow/models/tree/master/research/delf#delg) code released -| May 19, 2020 | [MobileDets: Searching for Object Detection Architectures for Mobile Accelerators](https://github.com/tensorflow/models/tree/master/research/object_detection#may-19th-2020) released -| May 7, 2020 | [MnasFPN with MobileNet-V2 backbone](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md#mobile-models) released for object detection -| May 1, 2020 | [DELF: DEep Local Features](https://github.com/tensorflow/models/tree/master/research/delf) updated to support TensorFlow 2.1 -| March 31, 2020 | [Introducing the Model Garden for TensorFlow 2](https://blog.tensorflow.org/2020/03/introducing-model-garden-for-tensorflow-2.html) ([Tweet](https://twitter.com/TensorFlow/status/1245029834633297921)) | - -## [Milestones](https://github.com/tensorflow/models/milestones) - -| Date | Milestone | -|------|-----------| -| July 7, 2020 | [![GitHub milestone](https://img.shields.io/github/milestones/progress/tensorflow/models/1)](https://github.com/tensorflow/models/milestone/1) | - -## Contributions - -[![help wanted:paper implementation](https://img.shields.io/github/issues/tensorflow/models/help%20wanted%3Apaper%20implementation)](https://github.com/tensorflow/models/labels/help%20wanted%3Apaper%20implementation) - -If you want to contribute, please review the [contribution guidelines](https://github.com/tensorflow/models/wiki/How-to-contribute). - -## License - -[Apache License 2.0](LICENSE) diff --git a/spaces/Nultx/VITS-TTS/attentions.py b/spaces/Nultx/VITS-TTS/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/Nultx/VITS-TTS/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/OAOA/DifFace/basicsr/test.py b/spaces/OAOA/DifFace/basicsr/test.py deleted file mode 100644 index 53cb3b7aa860c90518e15ba76e1a55fdf404bcc2..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/test.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -import torch -from os import path as osp - -from basicsr.data import build_dataloader, build_dataset -from basicsr.models import build_model -from basicsr.utils import get_env_info, get_root_logger, get_time_str, make_exp_dirs -from basicsr.utils.options import dict2str, parse_options - - -def test_pipeline(root_path): - # parse options, set distributed setting, set ramdom seed - opt, _ = parse_options(root_path, is_train=False) - - torch.backends.cudnn.benchmark = True - # torch.backends.cudnn.deterministic = True - - # mkdir and initialize loggers - make_exp_dirs(opt) - log_file = osp.join(opt['path']['log'], f"test_{opt['name']}_{get_time_str()}.log") - logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) - logger.info(get_env_info()) - logger.info(dict2str(opt)) - - # create test dataset and dataloader - test_loaders = [] - for _, dataset_opt in sorted(opt['datasets'].items()): - test_set = build_dataset(dataset_opt) - test_loader = build_dataloader( - test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) - logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}") - test_loaders.append(test_loader) - - # create model - model = build_model(opt) - - for test_loader in test_loaders: - test_set_name = test_loader.dataset.opt['name'] - logger.info(f'Testing {test_set_name}...') - model.validation(test_loader, current_iter=opt['name'], tb_logger=None, save_img=opt['val']['save_img']) - - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - test_pipeline(root_path) diff --git a/spaces/OAOA/DifFace/utils/util_common.py b/spaces/OAOA/DifFace/utils/util_common.py deleted file mode 100644 index 4a9464d867c007f114d2b07574737f2f8e0c1ec6..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/utils/util_common.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Power by Zongsheng Yue 2022-02-06 10:34:59 - -import importlib -from pathlib import Path - -def mkdir(dir_path, delete=False, parents=True): - import shutil - if not isinstance(dir_path, Path): - dir_path = Path(dir_path) - if delete: - if dir_path.exists(): - shutil.rmtree(str(dir_path)) - if not dir_path.exists(): - dir_path.mkdir(parents=parents) - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - -def instantiate_from_config(config): - if not "target" in config: - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ("yes", "true", "t", "y", "1"): - return True - elif v.lower() in ("no", "false", "f", "n", "0"): - return False - else: - raise argparse.ArgumentTypeError("Boolean value expected.") - -def get_filenames(dir_path, exts=['png', 'jpg'], recursive=True): - ''' - Get the file paths in the given folder. - param exts: list, e.g., ['png',] - return: list - ''' - if not isinstance(dir_path, Path): - dir_path = Path(dir_path) - - file_paths = [] - for current_ext in exts: - if recursive: - file_paths.extend([str(x) for x in dir_path.glob('**/*.'+current_ext)]) - else: - file_paths.extend([str(x) for x in dir_path.glob('*.'+current_ext)]) - - return file_paths - -def readline_txt(txt_file): - if txt_file is None: - out = [] - else: - with open(txt_file, 'r') as ff: - out = [x[:-1] for x in ff.readlines()] - return out diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/README.md deleted file mode 100644 index 17030bf0fd50bb843a508e13e97ed436eae33287..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/README.md +++ /dev/null @@ -1,83 +0,0 @@ -### 2021 Update: We are merging this example into the [S2T framework](../speech_to_text), which supports more generic speech-to-text tasks (e.g. speech translation) and more flexible data processing pipelines. Please stay tuned. - -# Speech Recognition -`examples/speech_recognition` is implementing ASR task in Fairseq, along with needed features, datasets, models and loss functions to train and infer model described in [Transformers with convolutional context for ASR (Abdelrahman Mohamed et al., 2019)](https://arxiv.org/abs/1904.11660). - - -## Additional dependencies -On top of main fairseq dependencies there are couple more additional requirements. - -1) Please follow the instructions to install [torchaudio](https://github.com/pytorch/audio). This is required to compute audio fbank features. -2) [Sclite](http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/sclite.htm#sclite_name_0) is used to measure WER. Sclite can be downloaded and installed from source from sctk package [here](http://www.openslr.org/4/). Training and inference doesn't require Sclite dependency. -3) [sentencepiece](https://github.com/google/sentencepiece) is required in order to create dataset with word-piece targets. - -## Preparing librispeech data -``` -./examples/speech_recognition/datasets/prepare-librispeech.sh $DIR_TO_SAVE_RAW_DATA $DIR_FOR_PREPROCESSED_DATA -``` - -## Training librispeech data -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 80 --task speech_recognition --arch vggtransformer_2 --optimizer adadelta --lr 1.0 --adadelta-eps 1e-8 --adadelta-rho 0.95 --clip-norm 10.0 --max-tokens 5000 --log-format json --log-interval 1 --criterion cross_entropy_acc --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -`$SET` can be `test_clean` or `test_other` -Any checkpoint in `$MODEL_PATH` can be selected. In this example we are working with `checkpoint_last.pt` -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --max-tokens 25000 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --beam 20 --results-path $RES_DIR --batch-size 40 --gen-subset $SET --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -``` -sclite -r ${RES_DIR}/ref.word-checkpoint_last.pt-${SET}.txt -h ${RES_DIR}/hypo.word-checkpoint_last.pt-${SET}.txt -i rm -o all stdout > $RES_REPORT -``` -`Sum/Avg` row from first table of the report has WER - -## Using flashlight (previously called [wav2letter](https://github.com/facebookresearch/wav2letter)) components -[flashlight](https://github.com/facebookresearch/flashlight) now has integration with fairseq. Currently this includes: - -* AutoSegmentationCriterion (ASG) -* flashlight-style Conv/GLU model -* flashlight's beam search decoder - -To use these, follow the instructions on [this page](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) to install python bindings. - -## Training librispeech data (flashlight style, Conv/GLU + ASG loss) -Training command: -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 100 --task speech_recognition --arch w2l_conv_glu_enc --batch-size 4 --optimizer sgd --lr 0.3,0.8 --momentum 0.8 --clip-norm 0.2 --max-tokens 50000 --log-format json --log-interval 100 --num-workers 0 --sentence-avg --criterion asg_loss --asg-transitions-init 5 --max-replabel 2 --linseg-updates 8789 --user-dir examples/speech_recognition -``` - -Note that ASG loss currently doesn't do well with word-pieces. You should prepare a dataset with character targets by setting `nbpe=31` in `prepare-librispeech.sh`. - -## Inference for librispeech (flashlight decoder, n-gram LM) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder kenlm --kenlm-model $KENLM_MODEL_PATH --lexicon $LEXICON_PATH --beam 200 --beam-threshold 15 --lm-weight 1.5 --word-score 1.5 --sil-weight -0.3 --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` - -`$KENLM_MODEL_PATH` should be a standard n-gram language model file. `$LEXICON_PATH` should be a flashlight-style lexicon (list of known words and their spellings). For ASG inference, a lexicon line should look like this (note the repetition labels): -``` -doorbell D O 1 R B E L 1 ▁ -``` -For CTC inference with word-pieces, repetition labels are not used and the lexicon should have most common spellings for each word (one can use sentencepiece's `NBestEncodeAsPieces` for this): -``` -doorbell ▁DOOR BE LL -doorbell ▁DOOR B E LL -doorbell ▁DO OR BE LL -doorbell ▁DOOR B EL L -doorbell ▁DOOR BE L L -doorbell ▁DO OR B E LL -doorbell ▁DOOR B E L L -doorbell ▁DO OR B EL L -doorbell ▁DO O R BE LL -doorbell ▁DO OR BE L L -``` -Lowercase vs. uppercase matters: the *word* should match the case of the n-gram language model (i.e. `$KENLM_MODEL_PATH`), while the *spelling* should match the case of the token dictionary (i.e. `$DIR_FOR_PREPROCESSED_DATA/dict.txt`). - -## Inference for librispeech (flashlight decoder, viterbi only) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder viterbi --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/process_data/clean_histogram.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/process_data/clean_histogram.py deleted file mode 100644 index e24e073dc0eb43c76e2ce717f52bb848c5b026b8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/process_data/clean_histogram.py +++ /dev/null @@ -1,52 +0,0 @@ -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument('--src', type=str, help='Source language') -parser.add_argument('--tgt', type=str, help='Target language') -parser.add_argument('--src-file', type=str, help='Input source file') -parser.add_argument('--tgt-file', type=str, help='Input target file') -parser.add_argument('--src-output-file', type=str, help='Output source file') -parser.add_argument('--tgt-output-file', type=str, help='Output target file') -parser.add_argument('--threshold', type=float, default=0.5, help='Threshold') -parser.add_argument('--threshold-character', type=str, default=']', help='Threshold character') -parser.add_argument('--histograms', type=str, help='Path to histograms') - -args = parser.parse_args() - - -def read_hist(f): - ch = [] - for line in f: - c = line[0] - if c == args.threshold_character: - break - ch.append(c) - return ch - - -with(open("{}/{}".format(args.histograms, args.src), 'r', encoding='utf8')) as f: - ch1 = read_hist(f) - -with(open("{}/{}".format(args.histograms, args.tgt), 'r', encoding='utf8')) as f: - ch2 = read_hist(f) - -print("Accepted characters for {}: {}".format(args.src, ch1)) -print("Accepted characters for {}: {}".format(args.tgt, ch2)) - -with open(args.src_file, 'r', encoding='utf8') as fs1, open(args.tgt_file, 'r', encoding='utf8') as fs2, open(args.src_output_file, 'w', encoding='utf8') as fos1, open(args.tgt_output_file, 'w', encoding='utf8') as fos2: - ls1 = fs1.readline() - ls2 = fs2.readline() - - while ls1 or ls2: - cnt1 = len([c for c in ls1.strip() if c in ch1]) - cnt2 = len([c for c in ls2.strip() if c in ch2]) - - if cnt1 / len(ls1) > args.threshold and cnt2 / len(ls2) > args.threshold: - fos1.write(ls1) - fos2.write(ls2) - else: - print("{} {} {} \n{} {} {}".format(args.src, cnt1 / len(ls1), ls1.strip(), args.tgt, cnt2 / len(ls2), ls2.strip())) - - ls1 = fs1.readline() - ls2 = fs2.readline() - \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pointer_generator/preprocess.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pointer_generator/preprocess.py deleted file mode 100644 index f72ca7d3d97e12ab7b405dcff314bdb6c0a78755..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pointer_generator/preprocess.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -from itertools import zip_longest - - -def replace_oovs(source_in, target_in, vocabulary, source_out, target_out): - """Replaces out-of-vocabulary words in source and target text with , - where N in is the position of the word in the source sequence. - """ - - def format_unk(pos): - return "".format(pos) - - if target_in is None: - target_in = [] - - for seq_num, (source_seq, target_seq) in enumerate( - zip_longest(source_in, target_in) - ): - source_seq_out = [] - target_seq_out = [] - - word_to_pos = dict() - for position, token in enumerate(source_seq.strip().split()): - if token in vocabulary: - token_out = token - else: - if token in word_to_pos: - oov_pos = word_to_pos[token] - else: - word_to_pos[token] = position - oov_pos = position - token_out = format_unk(oov_pos) - source_seq_out.append(token_out) - source_out.write(" ".join(source_seq_out) + "\n") - - if target_seq is not None: - for token in target_seq.strip().split(): - if token in word_to_pos: - token_out = format_unk(word_to_pos[token]) - else: - token_out = token - target_seq_out.append(token_out) - if target_out is not None: - target_out.write(" ".join(target_seq_out) + "\n") - - -def main(): - parser = argparse.ArgumentParser( - description="Replaces out-of-vocabulary words in both source and target " - "sequences with tokens that indicate the position of the word " - "in the source sequence." - ) - parser.add_argument( - "--source", type=str, help="text file with source sequences", required=True - ) - parser.add_argument( - "--target", type=str, help="text file with target sequences", default=None - ) - parser.add_argument("--vocab", type=str, help="vocabulary file", required=True) - parser.add_argument( - "--source-out", - type=str, - help="where to write source sequences with entries", - required=True, - ) - parser.add_argument( - "--target-out", - type=str, - help="where to write target sequences with entries", - default=None, - ) - args = parser.parse_args() - - with open(args.vocab, encoding="utf-8") as vocab: - vocabulary = vocab.read().splitlines() - - target_in = ( - open(args.target, "r", encoding="utf-8") if args.target is not None else None - ) - target_out = ( - open(args.target_out, "w", encoding="utf-8") - if args.target_out is not None - else None - ) - with open(args.source, "r", encoding="utf-8") as source_in, open( - args.source_out, "w", encoding="utf-8" - ) as source_out: - replace_oovs(source_in, target_in, vocabulary, source_out, target_out) - if target_in is not None: - target_in.close() - if target_out is not None: - target_out.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/OpenGVLab/DragGAN/stylegan2/op/upfirdn2d.cpp b/spaces/OpenGVLab/DragGAN/stylegan2/op/upfirdn2d.cpp deleted file mode 100644 index 73928ece8150f847d98af65a95685a29fcceecde..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/DragGAN/stylegan2/op/upfirdn2d.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include - -torch::Tensor upfirdn2d_op(const torch::Tensor &input, - const torch::Tensor &kernel, int up_x, int up_y, - int down_x, int down_y, int pad_x0, int pad_x1, - int pad_y0, int pad_y1); - -#define CHECK_CUDA(x) \ - TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -torch::Tensor upfirdn2d(const torch::Tensor &input, const torch::Tensor &kernel, - int up_x, int up_y, int down_x, int down_y, int pad_x0, - int pad_x1, int pad_y0, int pad_y1) { - CHECK_INPUT(input); - CHECK_INPUT(kernel); - - at::DeviceGuard guard(input.device()); - - return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, - pad_y0, pad_y1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); -} \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py deleted file mode 100644 index b1eedeebf8e3bde80722fc4acf51be6ca212cb3d..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -import torch - -from detectron2.config import configurable -from detectron2.layers import ShapeSpec, batched_nms_rotated -from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated -from detectron2.utils.events import get_event_storage - -from ..box_regression import Box2BoxTransformRotated -from ..poolers import ROIPooler -from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals -from .box_head import build_box_head -from .fast_rcnn import FastRCNNOutputLayers -from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads - -logger = logging.getLogger(__name__) - -""" -Shape shorthand in this module: - - N: number of images in the minibatch - R: number of ROIs, combined over all images, in the minibatch - Ri: number of ROIs in image i - K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. - -Naming convention: - - deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box - transform (see :class:`box_regression.Box2BoxTransformRotated`). - - pred_class_logits: predicted class scores in [-inf, +inf]; use - softmax(pred_class_logits) to estimate P(class). - - gt_classes: ground-truth classification labels in [0, K], where [0, K) represent - foreground object classes and K represents the background class. - - pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals - to detection box predictions. - - gt_proposal_deltas: ground-truth rotated box2box transform deltas -""" - - -def fast_rcnn_inference_rotated( - boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image -): - """ - Call `fast_rcnn_inference_single_image_rotated` for all images. - - Args: - boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic - boxes for each image. Element i has shape (Ri, K * 5) if doing - class-specific regression, or (Ri, 5) if doing class-agnostic - regression, where Ri is the number of predicted objects for image i. - This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. - scores (list[Tensor]): A list of Tensors of predicted class scores for each image. - Element i has shape (Ri, K + 1), where Ri is the number of predicted objects - for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. - image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. - score_thresh (float): Only return detections with a confidence score exceeding this - threshold. - nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. - topk_per_image (int): The number of top scoring detections to return. Set < 0 to return - all detections. - - Returns: - instances: (list[Instances]): A list of N instances, one for each image in the batch, - that stores the topk most confidence detections. - kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates - the corresponding boxes/scores index in [0, Ri) from the input, for image i. - """ - result_per_image = [ - fast_rcnn_inference_single_image_rotated( - boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image - ) - for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) - ] - return [x[0] for x in result_per_image], [x[1] for x in result_per_image] - - -def fast_rcnn_inference_single_image_rotated( - boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image -): - """ - Single-image inference. Return rotated bounding-box detection results by thresholding - on scores and applying rotated non-maximum suppression (Rotated NMS). - - Args: - Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes - per image. - - Returns: - Same as `fast_rcnn_inference_rotated`, but for only one image. - """ - valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) - if not valid_mask.all(): - boxes = boxes[valid_mask] - scores = scores[valid_mask] - - B = 5 # box dimension - scores = scores[:, :-1] - num_bbox_reg_classes = boxes.shape[1] // B - # Convert to Boxes to use the `clip` function ... - boxes = RotatedBoxes(boxes.reshape(-1, B)) - boxes.clip(image_shape) - boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B - # Filter results based on detection scores - filter_mask = scores > score_thresh # R x K - # R' x 2. First column contains indices of the R predictions; - # Second column contains indices of classes. - filter_inds = filter_mask.nonzero() - if num_bbox_reg_classes == 1: - boxes = boxes[filter_inds[:, 0], 0] - else: - boxes = boxes[filter_mask] - scores = scores[filter_mask] - - # Apply per-class Rotated NMS - keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) - if topk_per_image >= 0: - keep = keep[:topk_per_image] - boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] - - result = Instances(image_shape) - result.pred_boxes = RotatedBoxes(boxes) - result.scores = scores - result.pred_classes = filter_inds[:, 1] - - return result, filter_inds[:, 0] - - -class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): - """ - Two linear layers for predicting Rotated Fast R-CNN outputs. - """ - - @classmethod - def from_config(cls, cfg, input_shape): - args = super().from_config(cfg, input_shape) - args["box2box_transform"] = Box2BoxTransformRotated( - weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS - ) - return args - - def inference(self, predictions, proposals): - """ - Returns: - list[Instances]: same as `fast_rcnn_inference_rotated`. - list[Tensor]: same as `fast_rcnn_inference_rotated`. - """ - boxes = self.predict_boxes(predictions, proposals) - scores = self.predict_probs(predictions, proposals) - image_shapes = [x.image_size for x in proposals] - - return fast_rcnn_inference_rotated( - boxes, - scores, - image_shapes, - self.test_score_thresh, - self.test_nms_thresh, - self.test_topk_per_image, - ) - - -@ROI_HEADS_REGISTRY.register() -class RROIHeads(StandardROIHeads): - """ - This class is used by Rotated Fast R-CNN to detect rotated boxes. - For now, it only supports box predictions but not mask or keypoints. - """ - - @configurable - def __init__(self, **kwargs): - """ - NOTE: this interface is experimental. - """ - super().__init__(**kwargs) - assert ( - not self.mask_on and not self.keypoint_on - ), "Mask/Keypoints not supported in Rotated ROIHeads." - assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!" - - @classmethod - def _init_box_head(cls, cfg, input_shape): - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE - # fmt: on - assert pooler_type in ["ROIAlignRotated"], pooler_type - # assume all channel counts are equal - in_channels = [input_shape[f].channels for f in in_features][0] - - box_pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - box_head = build_box_head( - cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) - ) - # This line is the only difference v.s. StandardROIHeads - box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape) - return { - "box_in_features": in_features, - "box_pooler": box_pooler, - "box_head": box_head, - "box_predictor": box_predictor, - } - - @torch.no_grad() - def label_and_sample_proposals(self, proposals, targets): - """ - Prepare some proposals to be used to train the RROI heads. - It performs box matching between `proposals` and `targets`, and assigns - training labels to the proposals. - It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, - with a fraction of positives that is no larger than `self.positive_sample_fraction. - - Args: - See :meth:`StandardROIHeads.forward` - - Returns: - list[Instances]: length `N` list of `Instances`s containing the proposals - sampled for training. Each `Instances` has the following fields: - - proposal_boxes: the rotated proposal boxes - - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to - (this is only meaningful if the proposal has a label > 0; if label = 0 - then the ground-truth box is random) - - gt_classes: the ground-truth classification lable for each proposal - """ - if self.proposal_append_gt: - proposals = add_ground_truth_to_proposals(targets, proposals) - - proposals_with_gt = [] - - num_fg_samples = [] - num_bg_samples = [] - for proposals_per_image, targets_per_image in zip(proposals, targets): - has_gt = len(targets_per_image) > 0 - match_quality_matrix = pairwise_iou_rotated( - targets_per_image.gt_boxes, proposals_per_image.proposal_boxes - ) - matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) - sampled_idxs, gt_classes = self._sample_proposals( - matched_idxs, matched_labels, targets_per_image.gt_classes - ) - - proposals_per_image = proposals_per_image[sampled_idxs] - proposals_per_image.gt_classes = gt_classes - - if has_gt: - sampled_targets = matched_idxs[sampled_idxs] - proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] - - num_bg_samples.append((gt_classes == self.num_classes).sum().item()) - num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) - proposals_with_gt.append(proposals_per_image) - - # Log the number of fg/bg samples that are selected for training ROI heads - storage = get_event_storage() - storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) - storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) - - return proposals_with_gt diff --git a/spaces/PaddlePaddle/ERNIE-Zeus/README.md b/spaces/PaddlePaddle/ERNIE-Zeus/README.md deleted file mode 100644 index 97bc4b209eb34f5b9e025e606170073b48c50ee0..0000000000000000000000000000000000000000 --- a/spaces/PaddlePaddle/ERNIE-Zeus/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ERNIE Zeus -emoji: 👁 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/unittest.py b/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/unittest.py deleted file mode 100644 index 0675c022e4ba85d38d1f813490f6740150909524..0000000000000000000000000000000000000000 --- a/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/unittest.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# File : unittest.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import unittest - -import numpy as np -from torch.autograd import Variable - - -def as_numpy(v): - if isinstance(v, Variable): - v = v.data - return v.cpu().numpy() - - -class TorchTestCase(unittest.TestCase): - def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): - npa, npb = as_numpy(a), as_numpy(b) - self.assertTrue( - np.allclose(npa, npb, atol=atol), - 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) - ) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/cityscapes.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/cityscapes.py deleted file mode 100644 index 81e47a914a1aa2e5458e18669d65ffb742f46fc6..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/cityscapes.py +++ /dev/null @@ -1,217 +0,0 @@ -import os.path as osp -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -from annotator.uniformer.mmcv.utils import print_log -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CityscapesDataset(CustomDataset): - """Cityscapes dataset. - - The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is - fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. - """ - - CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], - [0, 80, 100], [0, 0, 230], [119, 11, 32]] - - def __init__(self, **kwargs): - super(CityscapesDataset, self).__init__( - img_suffix='_leftImg8bit.png', - seg_map_suffix='_gtFine_labelTrainIds.png', - **kwargs) - - @staticmethod - def _convert_to_label_id(result): - """Convert trainId to id for cityscapes.""" - if isinstance(result, str): - result = np.load(result) - import cityscapesscripts.helpers.labels as CSLabels - result_copy = result.copy() - for trainId, label in CSLabels.trainId2label.items(): - result_copy[result == trainId] = label.id - - return result_copy - - def results2img(self, results, imgfile_prefix, to_label_id): - """Write the segmentation results to images. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - imgfile_prefix (str): The filename prefix of the png files. - If the prefix is "somepath/xxx", - the png files will be named "somepath/xxx.png". - to_label_id (bool): whether convert output to label_id for - submission - - Returns: - list[str: str]: result txt files which contains corresponding - semantic segmentation images. - """ - mmcv.mkdir_or_exist(imgfile_prefix) - result_files = [] - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - if to_label_id: - result = self._convert_to_label_id(result) - filename = self.img_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - - png_filename = osp.join(imgfile_prefix, f'{basename}.png') - - output = Image.fromarray(result.astype(np.uint8)).convert('P') - import cityscapesscripts.helpers.labels as CSLabels - palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) - for label_id, label in CSLabels.id2label.items(): - palette[label_id] = label.color - - output.putpalette(palette) - output.save(png_filename) - result_files.append(png_filename) - prog_bar.update() - - return result_files - - def format_results(self, results, imgfile_prefix=None, to_label_id=True): - """Format the results into dir (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - imgfile_prefix (str | None): The prefix of images files. It - includes the file path and the prefix of filename, e.g., - "a/b/prefix". If not specified, a temp file will be created. - Default: None. - to_label_id (bool): whether convert output to label_id for - submission. Default: False - - Returns: - tuple: (result_files, tmp_dir), result_files is a list containing - the image paths, tmp_dir is the temporal directory created - for saving json/png files when img_prefix is not specified. - """ - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: ' - f'{len(results)} != {len(self)}') - - if imgfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - imgfile_prefix = tmp_dir.name - else: - tmp_dir = None - result_files = self.results2img(results, imgfile_prefix, to_label_id) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='mIoU', - logger=None, - imgfile_prefix=None, - efficient_test=False): - """Evaluation in Cityscapes/default protocol. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file, - for cityscapes evaluation only. It includes the file path and - the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with cityscapes protocol, it would be - the prefix of output png files. The output files would be - png images under folder "a/b/prefix/xxx.png", where "xxx" is - the image name of cityscapes. If not specified, a temp file - will be created for evaluation. - Default: None. - - Returns: - dict[str, float]: Cityscapes/default metrics. - """ - - eval_results = dict() - metrics = metric.copy() if isinstance(metric, list) else [metric] - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, logger, imgfile_prefix)) - metrics.remove('cityscapes') - if len(metrics) > 0: - eval_results.update( - super(CityscapesDataset, - self).evaluate(results, metrics, logger, efficient_test)) - - return eval_results - - def _evaluate_cityscapes(self, results, logger, imgfile_prefix): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file - - Returns: - dict[str: float]: Cityscapes evaluation results. - """ - try: - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install cityscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, imgfile_prefix) - - if tmp_dir is None: - result_dir = imgfile_prefix - else: - result_dir = tmp_dir.name - - eval_results = dict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - CSEval.args.evalInstLevelScore = True - CSEval.args.predictionPath = osp.abspath(result_dir) - CSEval.args.evalPixelAccuracy = True - CSEval.args.JSONOutput = False - - seg_map_list = [] - pred_list = [] - - # when evaluating with official cityscapesscripts, - # **_gtFine_labelIds.png is used - for seg_map in mmcv.scandir( - self.ann_dir, 'gtFine_labelIds.png', recursive=True): - seg_map_list.append(osp.join(self.ann_dir, seg_map)) - pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) - - eval_results.update( - CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) - - if tmp_dir is not None: - tmp_dir.cleanup() - - return eval_results diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/app.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/app.py deleted file mode 100644 index 0ce9742627e9cd3fdda140e34df1dfbf912860e7..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/app.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -from datetime import datetime - -from numpy import true_divide -import gradio as gr -import warnings - -warnings.filterwarnings("ignore") - -os.system("python setup.py build develop --user") - -from maskrcnn_benchmark.config import cfg -from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo -import vqa -import cv2 -from PIL import Image -import numpy as np - -# Use this command for evaluate the GLIP-T model -config_file = "configs/glip_Swin_T_O365_GoldG.yaml" -weight_file = "checkpoints/glip_tiny_model_o365_goldg_cc_sbu.pth" - -# manual override some options -cfg.local_rank = 0 -cfg.num_gpus = 1 -cfg.merge_from_file(config_file) -cfg.merge_from_list(["MODEL.WEIGHT", weight_file]) -cfg.merge_from_list(["MODEL.DEVICE", "cuda"]) - -glip_demo = GLIPDemo( - cfg, - min_image_size=800, - confidence_threshold=0.7, - show_mask_heatmaps=False -) -blip_demo = vqa.VQA( - model_path = 'checkpoints/model_base_vqa_capfilt_large.pth') - -def predict_image(image, object, question): - now = datetime.now() - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("TimeStamp {}".format(dt_string)) - result, _ = glip_demo.run_on_web_image(image[:, :, [2, 1, 0]], object, 0.5) - result = result[:, :, [2, 1, 0]] - answer = blip_demo.vqa_demo(image, question) - return result, answer - -def predict_video(video, object, question, frame_drop_value): - now = datetime.now() - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print("TimeStamp {}".format(dt_string)) - vid = cv2.VideoCapture(video) - count = 0 - result = None - answer = None - while True: - ret, frame = vid.read() - if ret: - count+=1 - if count % frame_drop_value == 0: - # image = Image.fromarray(frame) - image = frame - cv2.putText( - img = image, - text = str(count), - org = (20, 20), - fontFace = cv2.FONT_HERSHEY_DUPLEX, - fontScale = 0.5, - color = (125, 246, 55), - thickness = 1) - result, _ = glip_demo.run_on_web_image(image[:, :, [2, 1, 0]], object, 0.5) - answer = blip_demo.vqa_demo(image, question) - yield result, answer - else: - break - - yield result, answer - -with gr.Blocks() as demo: - gr.Markdown("Text-Based Object Detection and Visual Question Answering") - with gr.Tab("Image"): - with gr.Row(): - with gr.Column(): - image_input = gr.Image(label='input image') - obj_input = gr.Textbox(label='Objects', lines=1, placeholder="Objects here..") - vqa_input = gr.Textbox(label='Question', lines=1, placeholder="Question here..") - image_button = gr.Button("Submit") - - with gr.Column(): - image_output = gr.outputs.Image(type="pil", label="grounding results") - vqa_output = gr.Textbox(label="Answer") - - with gr.Tab("Video"): - with gr.Row(): - with gr.Column(): - video_input = gr.PlayableVideo(label='input video', mirror_webcam=False) - obj_input_video = gr.Textbox(label='Objects', lines=1, placeholder="Objects here..") - vqa_input_video = gr.Textbox(label='Question', lines=1, placeholder="Question here..") - frame_drop_input = gr.Slider(label='Frames drop value', minimum=0, maximum=30, step=1, value=5) - video_button = gr.Button("Submit") - - with gr.Column(): - video_output = gr.outputs.Image(type="pil", label="grounding results") - vqa_output_video = gr.Textbox(label="Answer") - - with gr.Tab("Webcam"): - with gr.Row(): - with gr.Column(): - cam_input = gr.Video(label='input video', mirror_webcam=False, source="webcam") - obj_input_cam = gr.Textbox(label='Objects', lines=1, placeholder="Objects here..") - vqa_input_cam = gr.Textbox(label='Question', lines=1, placeholder="Question here..") - frame_drop_input_cam = gr.Slider(label='Frames drop value', minimum=0, maximum=30, step=1, value=5) - cam_button = gr.Button("Submit") - - with gr.Column(): - cam_output = gr.outputs.Image(type="pil", label="grounding results") - vqa_output_cam = gr.Textbox(label="Answer") - - image_button.click(predict_image, inputs=[image_input, obj_input, vqa_input], outputs=[image_output, vqa_output]) - video_button.click(predict_video, inputs=[video_input, obj_input_video, vqa_input_video, frame_drop_input], outputs=[video_output, vqa_output_video]) - cam_button.click(predict_video, inputs=[cam_input, obj_input_cam, vqa_input_cam, frame_drop_input_cam], outputs=[cam_output, vqa_output_cam]) -demo.queue() -demo.launch() \ No newline at end of file diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/modules/conditioners.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/modules/conditioners.py deleted file mode 100644 index d10ac8dc96466375379c883cd62f7c04a1bb0a73..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,1411 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -from pathlib import Path -import random -import re -import typing as tp -import warnings - -import einops -from num2words import num2words -import spacy -from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .chroma import ChromaExtractor -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio import audio_read -from ..data.audio_dataset import SegmentInfo -from ..data.audio_utils import convert_audio -from ..environment import AudioCraftEnvironment -from ..quantization import ResidualVectorQuantizer -from ..utils.autocast import TorchAutocast -from ..utils.cache import EmbeddingCache -from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: torch.Tensor - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -class JointEmbedCondition(tp.NamedTuple): - wav: torch.Tensor - text: tp.List[tp.Optional[str]] - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def joint_embed_attributes(self): - return self.joint_embed.keys() - - @property - def attributes(self): - return { - "text": self.text_attributes, - "wav": self.wav_attributes, - "joint_embed": self.joint_embed_attributes, - } - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """Transform an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) - dim (int): The dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: A tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert isinstance(condition, tuple) and \ - isinstance(condition[0], torch.Tensor) and \ - isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(cond: WavCondition) -> WavCondition: - """Transform a WavCondition to a nullified WavCondition. - It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. - - Args: - cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. - Returns: - WavCondition: Nullified wav condition. - """ - null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), - sample_rate=cond.sample_rate, - path=[None] * cond.wav.shape[0], - seek_time=[None] * cond.wav.shape[0], - ) - - -def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: - """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, - and replacing metadata by dummy attributes. - - Args: - cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. - """ - null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) - return JointEmbedCondition( - wav=null_wav, text=[None] * len(embed.text), - length=torch.LongTensor([0]).to(embed.wav.device), - sample_rate=embed.sample_rate, - path=[None] * embed.wav.shape[0], - seek_time=[0] * embed.wav.shape[0], - ) - - -class Tokenizer: - """Base tokenizer implementation - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATION = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__(self, texts: tp.List[tp.Optional[str]], - return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (list[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tuple[torch.Tensor, torch.Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(torch.Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuation - text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. - We allow the output dim to be different than the hidden dim for two reasons: - 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model. - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim: int, output_dim: int): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == 'whitespace': - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == 'noop': - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__['t5'] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) - mask = inputs['attention_mask'] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs['attention_mask'] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, x: WavCondition) -> WavCondition: - wav, length, sample_rate, path, seek_time = x - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) - - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Gets as input a WavCondition and returns a dense embedding.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, x: WavCondition) -> ConditionType: - """Extract condition embedding and mask from a waveform and its metadata. - Args: - x (WavCondition): Waveform condition containing raw waveform and metadata. - Returns: - ConditionType: a dense vector representing the conditioning along with its mask - """ - wav, lengths, *_ = x - with torch.no_grad(): - embeds = self._get_wav_embedding(x) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner based on stems. - The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as - the drums and bass often dominate the chroma leading to the chroma features - not containing information about the melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma bins for the chroma extractor. - radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). - duration (int): duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): if True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, - device: tp.Union[torch.device, str] = 'cpu', **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) - stem_sources: list = self.demucs.sources # type: ignore - self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, - radix2_exp=radix2_exp, **kwargs).to(device) - self.chroma_len = self._get_chroma_len() - self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) - self.cache = None - if cache_path is not None: - self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_full_chroma_for_cache, - extract_embed_fn=self._extract_chroma_chunk) - - def _downsampling_factor(self) -> int: - return self.chroma.winhop - - def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: - """Load pre-defined waveforms from a json. - These waveforms will be used for chroma extraction during evaluation. - This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). - """ - if path is None: - return None - - logger.info(f"Loading evaluation wavs from {path}") - from audiocraft.data.audio_dataset import AudioDataset - dataset: AudioDataset = AudioDataset.from_meta( - path, segment_duration=self.duration, min_audio_duration=self.duration, - sample_rate=self.sample_rate, channels=1) - - if len(dataset) > 0: - eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) - logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") - return eval_wavs - else: - raise ValueError("Could not find evaluation wavs, check lengths of wavs") - - def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: - self.eval_wavs = eval_wavs - - def has_eval_wavs(self) -> bool: - return self.eval_wavs is not None - - def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: - """Sample wavs from a predefined list.""" - assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." - total_eval_wavs = len(self.eval_wavs) - out = self.eval_wavs - if num_samples > total_eval_wavs: - out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) - return out[torch.randperm(len(out))][:num_samples] - - def _get_chroma_len(self) -> int: - """Get length of chroma during training.""" - dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio( - wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning - mix_wav = stems.sum(1) # merge extracted stems to single waveform - mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore - return mix_wav - - @torch.no_grad() - def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: - """Extract chroma features from the waveform.""" - with self.autocast: - return self.chroma(wav) - - @torch.no_grad() - def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Compute wav embedding, applying stem and chroma extraction.""" - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self._extract_chroma(wav) - stems = self._get_stemmed_wav(wav, sample_rate) - chroma = self._extract_chroma(stems) - return chroma - - @torch.no_grad() - def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: - """Extract chroma from the whole audio waveform at the given path.""" - wav, sr = audio_read(path) - wav = wav[None].to(self.device) - wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) - chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] - return chroma - - def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: - """Extract a chunk of chroma from the full chroma derived from the full waveform.""" - wav_length = x.wav.shape[-1] - seek_time = x.seek_time[idx] - assert seek_time is not None, ( - "WavCondition seek_time is required " - "when extracting chroma chunks from pre-computed chroma.") - full_chroma = full_chroma.float() - frame_rate = self.sample_rate / self._downsampling_factor() - target_length = int(frame_rate * wav_length / self.sample_rate) - index = int(frame_rate * seek_time) - out = full_chroma[index: index + target_length] - out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] - return out.to(self.device) - - @torch.no_grad() - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Get the wav embedding from the WavCondition. - The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly - or will rely on the embedding cache to load the pre-computed embedding if relevant. - """ - sampled_wav: tp.Optional[torch.Tensor] = None - if not self.training and self.eval_wavs is not None: - warn_once(logger, "Using precomputed evaluation wavs!") - sampled_wav = self._sample_eval_wavs(len(x.wav)) - - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 - if sampled_wav is not None: - chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) - elif self.cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - chroma = self.cache.get_embed_from_cache(paths, x) - else: - assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." - chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) - - if self.match_len_on_eval: - B, T, C = chroma.shape - if T > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") - elif T < self.chroma_len: - n_repeat = int(math.ceil(self.chroma_len / T)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") - - return chroma - - def tokenize(self, x: WavCondition) -> WavCondition: - """Apply WavConditioner tokenization and populate cache if needed.""" - x = super().tokenize(x) - no_undefined_paths = all(p is not None for p in x.path) - if self.cache is not None and no_undefined_paths: - paths = [Path(p) for p in x.path if p is not None] - self.cache.populate_embed_cache(paths, x) - return x - - -class JointEmbeddingConditioner(BaseConditioner): - """Joint embedding conditioning supporting both audio or text conditioning. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - autocast_dtype (str): Autocast for the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, - n_q: int = 12, bins: int = 1024, **kwargs): - super().__init__(dim=dim, output_dim=output_dim) - self.device = device - self.attribute = attribute - if autocast_dtype is None or device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # residual vector quantizer to discretize the conditioned embedding - self.quantizer: tp.Optional[ResidualVectorQuantizer] = None - if quantize: - self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get joint embedding in latent space from the inputs. - - Returns: - tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding - and corresponding empty indexes. - """ - raise NotImplementedError() - - def forward(self, x: JointEmbedCondition) -> ConditionType: - with self.autocast: - embed, empty_idx = self._get_embed(x) - if self.quantizer is not None: - embed = embed.view(-1, self.dim, 1) - q_res = self.quantizer(embed, frame_rate=1) - out_embed = q_res.x.view(-1, self.dim) - else: - out_embed = embed - out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) - mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - out_embed = (out_embed * mask.unsqueeze(-1)) - return out_embed, mask - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - return x - - -class CLAPEmbeddingConditioner(JointEmbeddingConditioner): - """Joint Embedding conditioner based on pre-trained CLAP model. - - This CLAP-based conditioner supports a caching mechanism - over the computed embeddings for faster training. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - checkpoint (str): Path to CLAP checkpoint. - model_arch (str): CLAP model architecture. - enable_fusion (bool): Enable fusion for CLAP model. - sample_rate (int): Sample rate used by CLAP model. - max_audio_length (float): Maximum audio length for CLAP model. - audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. - normalize (bool): Whether to normalize the CLAP embedding. - text_p (float): Probability of using text representation instead of audio at train time. - batch_size (Optional[int]): Batch size for CLAP embedding computation. - autocast_dtype (str): Autocast for the conditioner. - cache_path (Optional[str]): Path for pre-computed embeddings caching. - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, - enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, - normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, - autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): - try: - import laion_clap # type: ignore - except ImportError: - raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") - checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint) - clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base') - clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch) - load_clap_state_dict(clap_model, checkpoint) - clap_model.eval() - clap_model.to(device) - super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute, - autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins, - **kwargs) - self.checkpoint = checkpoint - self.enable_fusion = enable_fusion - self.model_arch = model_arch - self.clap: laion_clap.CLAP_Module - self.clap_tokenize: RobertaTokenizer - self.clap_sample_rate = sample_rate - self.clap_max_frames = int(self.clap_sample_rate * max_audio_length) - self.clap_stride = int(self.clap_sample_rate * audio_stride) - self.batch_size = batch_size or 1 - self.normalize = normalize - self.text_p = text_p - self.__dict__['clap_tokenize'] = clap_tokenize - self.__dict__['clap'] = clap_model - self.wav_cache, self.text_cache = None, None - if cache_path is not None: - self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_wav_embedding_for_cache, - extract_embed_fn=self._extract_wav_embedding_chunk) - self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device, - compute_embed_fn=self._get_text_embedding_for_cache) - - def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict: - # we use the default params from CLAP module here as well - return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt") - - def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor: - """Compute text embedding from CLAP model on a given a batch of text. - - Args: - text (list[str]): List of text for the batch, with B items. - Returns: - torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension. - """ - with torch.no_grad(): - embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True) - return embed.view(embed.size(0), 1, embed.size(-1)) - - def _get_text_embedding_for_cache(self, path: tp.Union[Path, str], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Get text embedding function for the cache.""" - text = x.text[idx] - text = text if text is not None else "" - return self._compute_text_embedding([text])[0] - - def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor: - """Preprocess wav to expected format by CLAP model. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch - Returns: - torch.Tensor: Audio wav of shape [B, T]. - """ - assert wav.dim() == 3, "Expecting wav to be [B, C, T]" - if sample_rates is not None: - _wav = [] - for i, audio in enumerate(wav): - sr = sample_rates[i] - audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1) - _wav.append(audio) - wav = torch.stack(_wav, dim=0) - wav = wav.mean(dim=1) - return wav - - def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor, - sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor: - """Compute audio wave embedding from CLAP model. - - Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences, - we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and - average the resulting embeddings. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch. - reduce_mean (bool): Whether to get the average tensor. - Returns: - torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension. - """ - with torch.no_grad(): - wav = self._preprocess_wav(wav, length, sample_rates) - B, T = wav.shape - if T >= self.clap_max_frames: - wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T] - else: - wav = wav.view(-1, 1, T) # [B, F, T] with F=1 - wav = einops.rearrange(wav, 'b f t -> (b f) t') - embed_list = [] - for i in range(0, wav.size(0), self.batch_size): - _wav = wav[i:i+self.batch_size, ...] - _embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True) - embed_list.append(_embed) - embed = torch.cat(embed_list, dim=0) - embed = einops.rearrange(embed, '(b f) d -> b f d', b=B) - if reduce_mean: - embed = embed.mean(dim=1, keepdim=True) - return embed # [B, F, D] with F=1 if reduce_mean is True - - def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Compute audio wave embedding for the cache. - The embedding is computed on a given audio read from file. - - Args: - path (str or Path): Path to the full audio file. - Returns: - torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension. - """ - wav, sr = audio_read(path) # [C, T] - wav = wav.unsqueeze(0).to(self.device) # [1, C, T] - wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device) - embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D] - return embed.squeeze(0) # [F, D] - - def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding. - - Args: - full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D]. - x (JointEmbedCondition): Joint embedding condition for the full batch. - idx (int): Index considered for the given embedding to extract. - Returns: - torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D]. - """ - sample_rate = x.sample_rate[idx] - seek_time = x.seek_time[idx] - seek_time = 0. if seek_time is None else seek_time - clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate - end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate - start_offset = int(seek_time * sample_rate // clap_stride) - end_offset = int(end_seek_time * sample_rate // clap_stride) - wav_embed = full_embed[start_offset:end_offset, ...] - wav_embed = wav_embed.mean(dim=0, keepdim=True) - return wav_embed.to(self.device) # [F, D] - - def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of text descriptions.""" - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.text_cache is not None and no_nullified_cond: - assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - embed = self.text_cache.get_embed_from_cache(paths, x) - else: - text = [xi if xi is not None else "" for xi in x.text] - embed = self._compute_text_embedding(text) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of audio tensors (and corresponding sample rates).""" - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.wav_cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - embed = self.wav_cache.get_embed_from_cache(paths, x) - else: - embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - # Trying to limit as much as possible sync points when the cache is warm. - no_undefined_paths = all(p is not None for p in x.path) - if self.wav_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.wav_cache.populate_embed_cache(paths, x) - if self.text_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.text_cache.populate_embed_cache(paths, x) - return x - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Extract shared latent representation from either the wav or the text using CLAP.""" - # decide whether to use text embedding at train time or not - use_text_embed = random.random() < self.text_p - if self.training and not use_text_embed: - embed = self._get_wav_embedding(x) - empty_idx = torch.LongTensor([]) # we assume we always have the audio wav - else: - embed = self._get_text_embedding(x) - empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""]) - return embed, empty_idx - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes: - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using `nullify_condition` function. - If the condition is of any other type, set its value to None. - Works in-place. - """ - if condition_type not in ['text', 'wav', 'joint_embed']: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f" but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == 'wav': - wav_cond = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav_cond) - elif condition_type == 'joint_embed': - embed = sample.joint_embed[condition] - sample.joint_embed[condition] = nullify_joint_embed(embed) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base module for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Dropout with a given probability per attribute. - This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes - to be dropped out separately. For example, "artist" can be dropped while "genre" remains. - This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre" - must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Classifier Free Guidance dropout. - All attributes are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Prepare and provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - device (torch.device or str, optional): Device for conditioners and output condition types. - """ - def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - self.device = device - self.conditioners = nn.ModuleDict(conditioners) - - @property - def joint_embed_conditions(self): - return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)] - - @property - def has_joint_embed_conditions(self): - return len(self.joint_embed_conditions) > 0 - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([isinstance(x, ConditioningAttributes) for x in inputs]), ( - "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]", - f" but types were {set([type(x) for x in inputs])}" - ) - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - joint_embeds = self._collate_joint_embeds(inputs) - - assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), ( - f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ", - f"got {text.keys(), wavs.keys(), joint_embeds.keys()}" - ) - - for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations. - The output is for example: - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch. - """ - out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - out[condition].append(text[condition]) - return out - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, WavCondition]: - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attributes. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, WavCondition]: A dictionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - out: tp.Dict[str, WavCondition] = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, sample_rate, path, seek_time = sample.wav[attribute] - assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]" - assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1" - # mono-channel conditioning - wav = wav.mean(1, keepdim=True) # [1, 1, T] - wavs[attribute].append(wav.flatten()) # [T] - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition( - stacked_wav.unsqueeze(1), torch.cat(lengths[attribute]), sample_rates[attribute], - paths[attribute], seek_times[attribute]) - - return out - - def _collate_joint_embeds(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, JointEmbedCondition]: - """Generate a dict where the keys are attributes by which we compute joint embeddings, - and the values are Tensors of pre-computed embeddings and the corresponding text attributes. - - Args: - samples (list[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - A dictionary mapping an attribute name to joint embeddings. - """ - texts = defaultdict(list) - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - channels: int = 0 - - out = {} - for sample in samples: - for attribute in self.joint_embed_conditions: - wav, text, length, sample_rate, path, seek_time = sample.joint_embed[attribute] - assert wav.dim() == 3 - if channels == 0: - channels = wav.size(1) - else: - assert channels == wav.size(1), "not all audio has same number of channels in batch" - assert wav.size(0) == 1, "Expecting single-wav batch in the collate method" - wav = einops.rearrange(wav, "b c t -> (b c t)") # [1, C, T] => [C * T] - wavs[attribute].append(wav) - texts[attribute].extend(text) - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - for attribute in self.joint_embed_conditions: - stacked_texts = texts[attribute] - stacked_paths = paths[attribute] - stacked_seek_times = seek_times[attribute] - stacked_wavs = pad_sequence(wavs[attribute]).to(self.device) - stacked_wavs = einops.rearrange(stacked_wavs, "(c t) b -> b c t", c=channels) - stacked_sample_rates = sample_rates[attribute] - stacked_lengths = torch.cat(lengths[attribute]).to(self.device) - assert stacked_lengths.size(0) == stacked_wavs.size(0) - assert len(stacked_sample_rates) == stacked_wavs.size(0) - assert len(stacked_texts) == stacked_wavs.size(0) - out[attribute] = JointEmbedCondition( - text=stacked_texts, wav=stacked_wavs, - length=stacked_lengths, sample_rate=stacked_sample_rates, - path=stacked_paths, seek_time=stacked_seek_times) - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"Got invalid fuse method, allowed methods: {self.FUSING_METHODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: torch.Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (torch.Tensor): Transformer input. - conditions (dict[str, ConditionType]): Dict of conditions. - Returns: - tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == 'sum': - input += cond - elif op == 'input_interpolate': - cond = einops.rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += einops.rearrange(cond, "b d t -> b t d") - elif op == 'prepend': - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == 'cross': - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/RamAnanth1/Youtube-to-HF-Dataset/utils.py b/spaces/RamAnanth1/Youtube-to-HF-Dataset/utils.py deleted file mode 100644 index 501d32d04ad105bb8b04f9c35f3c19332a0f35d5..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/Youtube-to-HF-Dataset/utils.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -from typing import Any - -VIDEO_INFO = [ - "id", - "channel", - "channel_id", - "title", - "categories", - "tags", - "description" - ] - -TRANSCRIPT_INFO = [ - "text", - "segments" - ] - -SEGMENTS_INFO = [ - "start", - "end", - "text" - ] - -AUDIO_FILES = [ - ".webm", - ".mp3", - ".flac", - ".wav", - ".m4a" - ] - -YT_OPTIONS = { - "format": "bestaudio/best", - "extractaudio": True, - "audioformat": "mp3", - "yesplaylist": True, - "postprocessors": [{ - "key": "FFmpegExtractAudio", - "preferredcodec": "mp3", - "preferredquality": "192", - }] - } - - -def json_dump(obj: Any, save_path: str) -> None: - with open(save_path, "w") as file: - json.dump(obj, file) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/connection.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/connection.py deleted file mode 100644 index 6af1138f260e4eaaa0aa242f7f50b918a283b49f..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/connection.py +++ /dev/null @@ -1,149 +0,0 @@ -from __future__ import absolute_import - -import socket - -from ..contrib import _appengine_environ -from ..exceptions import LocationParseError -from ..packages import six -from .wait import NoWayToWaitForSocketError, wait_for_read - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`http.client.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, "sock", False) - if sock is False: # Platform-specific: AppEngine - return False - if sock is None: # Connection already closed (such as by httplib). - return True - try: - # Returns True if readable, which here means it's been dropped - return wait_for_read(sock, timeout=0.0) - except NoWayToWaitForSocketError: # Platform-specific: AppEngine - return False - - -# This function is copied from socket.py in the Python 2.7 standard -# library test suite. Added to its signature is only `socket_options`. -# One additional modification is that we avoid binding to IPv6 servers -# discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection( - address, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, - socket_options=None, -): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`socket.getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - if host.startswith("["): - host = host.strip("[]") - err = None - - # Using the value from allowed_gai_family() in the context of getaddrinfo lets - # us select whether to work with IPv4 DNS records, IPv6 records, or both. - # The original create_connection function always returns all records. - family = allowed_gai_family() - - try: - host.encode("idna") - except UnicodeError: - return six.raise_from( - LocationParseError(u"'%s', label empty or too long" % host), None - ) - - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - - # If provided, set socket level options before connecting. - _set_socket_options(sock, socket_options) - - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except socket.error as e: - err = e - if sock is not None: - sock.close() - sock = None - - if err is not None: - raise err - - raise socket.error("getaddrinfo returns an empty list") - - -def _set_socket_options(sock, options): - if options is None: - return - - for opt in options: - sock.setsockopt(*opt) - - -def allowed_gai_family(): - """This function is designed to work in the context of - getaddrinfo, where family=socket.AF_UNSPEC is the default and - will perform a DNS search for both IPv6 and IPv4 records.""" - - family = socket.AF_INET - if HAS_IPV6: - family = socket.AF_UNSPEC - return family - - -def _has_ipv6(host): - """Returns True if the system can bind an IPv6 address.""" - sock = None - has_ipv6 = False - - # App Engine doesn't support IPV6 sockets and actually has a quota on the - # number of sockets that can be used, so just early out here instead of - # creating a socket needlessly. - # See https://github.com/urllib3/urllib3/issues/1446 - if _appengine_environ.is_appengine_sandbox(): - return False - - if socket.has_ipv6: - # has_ipv6 returns true if cPython was compiled with IPv6 support. - # It does not tell us if the system has IPv6 support enabled. To - # determine that we must bind to an IPv6 address. - # https://github.com/urllib3/urllib3/pull/611 - # https://bugs.python.org/issue658327 - try: - sock = socket.socket(socket.AF_INET6) - sock.bind((host, 0)) - has_ipv6 = True - except Exception: - pass - - if sock: - sock.close() - return has_ipv6 - - -HAS_IPV6 = _has_ipv6("::1") diff --git a/spaces/Realcat/image-matching-webui/third_party/lanet/evaluation/descriptor_evaluation.py b/spaces/Realcat/image-matching-webui/third_party/lanet/evaluation/descriptor_evaluation.py deleted file mode 100644 index e7e89dde3ec77a1e83317b5b91fc9b31d887b337..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/lanet/evaluation/descriptor_evaluation.py +++ /dev/null @@ -1,286 +0,0 @@ -# Copyright 2020 Toyota Research Institute. All rights reserved. -# Adapted from: https://github.com/rpautrat/SuperPoint/blob/master/superpoint/evaluations/descriptor_evaluation.py - -import random -from glob import glob -from os import path as osp - -import cv2 -import numpy as np - -from lanet_utils import warp_keypoints - - -def select_k_best(points, descriptors, k): - """Select the k most probable points (and strip their probability). - points has shape (num_points, 3) where the last coordinate is the probability. - - Parameters - ---------- - points: numpy.ndarray (N,3) - Keypoint vector, consisting of (x,y,probability). - descriptors: numpy.ndarray (N,256) - Keypoint descriptors. - k: int - Number of keypoints to select, based on probability. - Returns - ------- - - selected_points: numpy.ndarray (k,2) - k most probable keypoints. - selected_descriptors: numpy.ndarray (k,256) - Descriptors corresponding to the k most probable keypoints. - """ - sorted_prob = points[points[:, 2].argsort(), :2] - sorted_desc = descriptors[points[:, 2].argsort(), :] - start = min(k, points.shape[0]) - selected_points = sorted_prob[-start:, :] - selected_descriptors = sorted_desc[-start:, :] - return selected_points, selected_descriptors - - -def keep_shared_points(keypoints, descriptors, H, shape, keep_k_points=1000): - """ - Compute a list of keypoints from the map, filter the list of points by keeping - only the points that once mapped by H are still inside the shape of the map - and keep at most 'keep_k_points' keypoints in the image. - - Parameters - ---------- - keypoints: numpy.ndarray (N,3) - Keypoint vector, consisting of (x,y,probability). - descriptors: numpy.ndarray (N,256) - Keypoint descriptors. - H: numpy.ndarray (3,3) - Homography. - shape: tuple - Image shape. - keep_k_points: int - Number of keypoints to select, based on probability. - - Returns - ------- - selected_points: numpy.ndarray (k,2) - k most probable keypoints. - selected_descriptors: numpy.ndarray (k,256) - Descriptors corresponding to the k most probable keypoints. - """ - - def keep_true_keypoints(points, descriptors, H, shape): - """Keep only the points whose warped coordinates by H are still inside shape.""" - warped_points = warp_keypoints(points[:, [1, 0]], H) - warped_points[:, [0, 1]] = warped_points[:, [1, 0]] - mask = ( - (warped_points[:, 0] >= 0) - & (warped_points[:, 0] < shape[0]) - & (warped_points[:, 1] >= 0) - & (warped_points[:, 1] < shape[1]) - ) - return points[mask, :], descriptors[mask, :] - - selected_keypoints, selected_descriptors = keep_true_keypoints( - keypoints, descriptors, H, shape - ) - selected_keypoints, selected_descriptors = select_k_best( - selected_keypoints, selected_descriptors, keep_k_points - ) - return selected_keypoints, selected_descriptors - - -def compute_matching_score(data, keep_k_points=1000): - """ - Compute the matching score between two sets of keypoints with associated descriptors. - - Parameters - ---------- - data: dict - Input dictionary containing: - image_shape: tuple (H,W) - Original image shape. - homography: numpy.ndarray (3,3) - Ground truth homography. - prob: numpy.ndarray (N,3) - Keypoint vector, consisting of (x,y,probability). - warped_prob: numpy.ndarray (N,3) - Warped keypoint vector, consisting of (x,y,probability). - desc: numpy.ndarray (N,256) - Keypoint descriptors. - warped_desc: numpy.ndarray (N,256) - Warped keypoint descriptors. - keep_k_points: int - Number of keypoints to select, based on probability. - - Returns - ------- - ms: float - Matching score. - """ - shape = data["image_shape"] - real_H = data["homography"] - - # Filter out predictions - keypoints = data["prob"][:, :2].T - keypoints = keypoints[::-1] - prob = data["prob"][:, 2] - keypoints = np.stack([keypoints[0], keypoints[1], prob], axis=-1) - - warped_keypoints = data["warped_prob"][:, :2].T - warped_keypoints = warped_keypoints[::-1] - warped_prob = data["warped_prob"][:, 2] - warped_keypoints = np.stack( - [warped_keypoints[0], warped_keypoints[1], warped_prob], axis=-1 - ) - - desc = data["desc"] - warped_desc = data["warped_desc"] - - # Keeps all points for the next frame. The matching for caculating M.Score shouldnt use only in view points. - keypoints, desc = select_k_best(keypoints, desc, keep_k_points) - warped_keypoints, warped_desc = select_k_best( - warped_keypoints, warped_desc, keep_k_points - ) - - # Match the keypoints with the warped_keypoints with nearest neighbor search - # This part needs to be done with crossCheck=False. - # All the matched pairs need to be evaluated without any selection. - bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False) - - matches = bf.match(desc, warped_desc) - matches_idx = np.array([m.queryIdx for m in matches]) - m_keypoints = keypoints[matches_idx, :] - matches_idx = np.array([m.trainIdx for m in matches]) - m_warped_keypoints = warped_keypoints[matches_idx, :] - - true_warped_keypoints = warp_keypoints( - m_warped_keypoints[:, [1, 0]], np.linalg.inv(real_H) - )[:, ::-1] - vis_warped = np.all( - (true_warped_keypoints >= 0) & (true_warped_keypoints <= (np.array(shape) - 1)), - axis=-1, - ) - norm1 = np.linalg.norm(true_warped_keypoints - m_keypoints, axis=-1) - - correct1 = norm1 < 3 - count1 = np.sum(correct1 * vis_warped) - score1 = count1 / np.maximum(np.sum(vis_warped), 1.0) - - matches = bf.match(warped_desc, desc) - matches_idx = np.array([m.queryIdx for m in matches]) - m_warped_keypoints = warped_keypoints[matches_idx, :] - matches_idx = np.array([m.trainIdx for m in matches]) - m_keypoints = keypoints[matches_idx, :] - - true_keypoints = warp_keypoints(m_keypoints[:, [1, 0]], real_H)[:, ::-1] - vis = np.all( - (true_keypoints >= 0) & (true_keypoints <= (np.array(shape) - 1)), axis=-1 - ) - norm2 = np.linalg.norm(true_keypoints - m_warped_keypoints, axis=-1) - - correct2 = norm2 < 3 - count2 = np.sum(correct2 * vis) - score2 = count2 / np.maximum(np.sum(vis), 1.0) - - ms = (score1 + score2) / 2 - - return ms - - -def compute_homography(data, keep_k_points=1000): - """ - Compute the homography between 2 sets of Keypoints and descriptors inside data. - Use the homography to compute the correctness metrics (1,3,5). - - Parameters - ---------- - data: dict - Input dictionary containing: - image_shape: tuple (H,W) - Original image shape. - homography: numpy.ndarray (3,3) - Ground truth homography. - prob: numpy.ndarray (N,3) - Keypoint vector, consisting of (x,y,probability). - warped_prob: numpy.ndarray (N,3) - Warped keypoint vector, consisting of (x,y,probability). - desc: numpy.ndarray (N,256) - Keypoint descriptors. - warped_desc: numpy.ndarray (N,256) - Warped keypoint descriptors. - keep_k_points: int - Number of keypoints to select, based on probability. - - Returns - ------- - correctness1: float - correctness1 metric. - correctness3: float - correctness3 metric. - correctness5: float - correctness5 metric. - """ - shape = data["image_shape"] - real_H = data["homography"] - - # Filter out predictions - keypoints = data["prob"][:, :2].T - keypoints = keypoints[::-1] - prob = data["prob"][:, 2] - keypoints = np.stack([keypoints[0], keypoints[1], prob], axis=-1) - - warped_keypoints = data["warped_prob"][:, :2].T - warped_keypoints = warped_keypoints[::-1] - warped_prob = data["warped_prob"][:, 2] - warped_keypoints = np.stack( - [warped_keypoints[0], warped_keypoints[1], warped_prob], axis=-1 - ) - - desc = data["desc"] - warped_desc = data["warped_desc"] - - # Keeps only the points shared between the two views - keypoints, desc = keep_shared_points(keypoints, desc, real_H, shape, keep_k_points) - warped_keypoints, warped_desc = keep_shared_points( - warped_keypoints, warped_desc, np.linalg.inv(real_H), shape, keep_k_points - ) - - bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) - matches = bf.match(desc, warped_desc) - matches_idx = np.array([m.queryIdx for m in matches]) - m_keypoints = keypoints[matches_idx, :] - matches_idx = np.array([m.trainIdx for m in matches]) - m_warped_keypoints = warped_keypoints[matches_idx, :] - - # Estimate the homography between the matches using RANSAC - H, _ = cv2.findHomography( - m_keypoints[:, [1, 0]], - m_warped_keypoints[:, [1, 0]], - cv2.RANSAC, - 3, - maxIters=5000, - ) - - if H is None: - return 0, 0, 0 - - shape = shape[::-1] - - # Compute correctness - corners = np.array( - [ - [0, 0, 1], - [0, shape[1] - 1, 1], - [shape[0] - 1, 0, 1], - [shape[0] - 1, shape[1] - 1, 1], - ] - ) - real_warped_corners = np.dot(corners, np.transpose(real_H)) - real_warped_corners = real_warped_corners[:, :2] / real_warped_corners[:, 2:] - warped_corners = np.dot(corners, np.transpose(H)) - warped_corners = warped_corners[:, :2] / warped_corners[:, 2:] - - mean_dist = np.mean(np.linalg.norm(real_warped_corners - warped_corners, axis=1)) - correctness1 = float(mean_dist <= 1) - correctness3 = float(mean_dist <= 3) - correctness5 = float(mean_dist <= 5) - - return correctness1, correctness3, correctness5 diff --git a/spaces/Reself/StableVideo/annotator/midas/midas/midas_net_custom.py b/spaces/Reself/StableVideo/annotator/midas/midas/midas_net_custom.py deleted file mode 100644 index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/annotator/midas/midas/midas_net_custom.py +++ /dev/null @@ -1,128 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder - - -class MidasNet_small(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, - blocks={'expand': True}): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet_small, self).__init__() - - use_pretrained = False if path else True - - self.channels_last = channels_last - self.blocks = blocks - self.backbone = backbone - - self.groups = 1 - - features1=features - features2=features - features3=features - features4=features - self.expand = False - if "expand" in self.blocks and self.blocks['expand'] == True: - self.expand = True - features1=features - features2=features*2 - features3=features*4 - features4=features*8 - - self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) - - self.scratch.activation = nn.ReLU(False) - - self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) - - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), - self.scratch.activation, - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - if path: - self.load(path) - - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - if self.channels_last==True: - print("self.channels_last = ", self.channels_last) - x.contiguous(memory_format=torch.channels_last) - - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) - - - -def fuse_model(m): - prev_previous_type = nn.Identity() - prev_previous_name = '' - previous_type = nn.Identity() - previous_name = '' - for name, module in m.named_modules(): - if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: - # print("FUSED ", prev_previous_name, previous_name, name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) - elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: - # print("FUSED ", prev_previous_name, previous_name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) - # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: - # print("FUSED ", previous_name, name) - # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) - - prev_previous_type = previous_type - prev_previous_name = previous_name - previous_type = type(module) - previous_name = name \ No newline at end of file diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py deleted file mode 100644 index 8200b7ef51963ae218e3b871de270a826bf10459..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/precision_recall.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Precision/Recall (PR) from the paper "Improved Precision and Recall -Metric for Assessing Generative Models". Matches the original implementation -by Kynkaanniemi et al. at -https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py""" - -import torch -from . import metric_utils - -#---------------------------------------------------------------------------- - -def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size): - assert 0 <= rank < num_gpus - num_cols = col_features.shape[0] - num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus - col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches) - dist_batches = [] - for col_batch in col_batches[rank :: num_gpus]: - dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0] - for src in range(num_gpus): - dist_broadcast = dist_batch.clone() - if num_gpus > 1: - torch.distributed.broadcast(dist_broadcast, src=src) - dist_batches.append(dist_broadcast.cpu() if rank == 0 else None) - return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None - -#---------------------------------------------------------------------------- - -def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size): - detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' - detector_kwargs = dict(return_features=True) - - real_features = metric_utils.compute_feature_stats_for_dataset( - opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, - rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device) - - gen_features = metric_utils.compute_feature_stats_for_generator( - opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, - rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device) - - results = dict() - for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]: - kth = [] - for manifold_batch in manifold.split(row_batch_size): - dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) - kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None) - kth = torch.cat(kth) if opts.rank == 0 else None - pred = [] - for probes_batch in probes.split(row_batch_size): - dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) - pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None) - results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan') - return results['precision'], results['recall'] - -#---------------------------------------------------------------------------- diff --git a/spaces/SAAZIZI/SummarizeAV/Dockerfile b/spaces/SAAZIZI/SummarizeAV/Dockerfile deleted file mode 100644 index c5b40ea88717a405017cbec884b3183fdda5935b..0000000000000000000000000000000000000000 --- a/spaces/SAAZIZI/SummarizeAV/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -# Use an official Python runtime as a base image -# Use Ubuntu as the base image -FROM ubuntu:latest - -# Set environment variables to non-interactive (this prevents some prompts) -ENV DEBIAN_FRONTEND=non-interactive - -# Update and install system dependencies and Python -RUN apt-get update && \ - apt-get install -y g++ gdb make ninja-build rsync zip python3 python3-pip - -# Upgrade pip and install Python packages -RUN pip3 install --upgrade pip setuptools wheel - -# Set the working directory inside the container -WORKDIR /app - -# Install git -RUN apt-get -y update -RUN apt-get -y install git - -# Add C++ compiler and other build essentials for packages requiring compilation -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - gcc \ - g++ \ - cmake \ - && rm -rf /var/lib/apt/lists/* - - -# Copy the current directory contents into the container at /app -COPY . /app - - -# Install the Python dependencies -RUN pip install --no-cache-dir -r requirements.txt - -# Make port 8501 available to the world outside this container -EXPOSE 8501 - -# Run your Streamlit app -CMD ["streamlit", "run", "app.py"] diff --git a/spaces/SLU-CSCI4750/Demo8_RegressionGradientDecentCompare/app.py b/spaces/SLU-CSCI4750/Demo8_RegressionGradientDecentCompare/app.py deleted file mode 100644 index 4c9badd045f4961ab2cbc55e60d709f20572863c..0000000000000000000000000000000000000000 --- a/spaces/SLU-CSCI4750/Demo8_RegressionGradientDecentCompare/app.py +++ /dev/null @@ -1,255 +0,0 @@ -### CSCI 4750/5750: regression models -### SLU-CS: Jie Hou - -import gradio as gr -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -from sklearn.linear_model import LinearRegression - -def cal_mse(X,y,b,w): - thetas = np.array([[b], [w]]) - X_b = np.c_[np.ones((len(X), 1)), X] # add x0 = 1 to each instance - y_predict = X_b.dot(thetas) - mse = np.mean((y_predict-y)**2) - return mse - -def gradient_descent(n_samples=100, intercept=4, slope=3, intercept_random=4, slope_random=3, gradient_descent='False', gradient_descent_type = 'Batch GradientDescent' , learning_rate= 0.01, iteration=100, mini_batchsize = 32): - if n_samples < mini_batchsize: - mini_batchsize = n_samples - ### (1) generate simulated data points - X = 2 * np.random.rand(n_samples, 1) - y = intercept + slope * X + np.random.randn(n_samples, 1) - - ### (2) fit regression model - lin_reg = LinearRegression() - lin_reg.fit(X, y) - - ### (3) make a prediction on training data - y_predict = lin_reg.predict(X) - y_predict - - ### (4) Draw baseline linear Line - fig = plt.figure(figsize=(12,18)) - - plt.subplot(3,1,1) - plt.plot(X, y_predict, "r-", linewidth=2, label = "Line of best fit") - plt.plot(X, y, "b.") - - - ### (4.2) Draw random line - if intercept_random != intercept or slope_random != slope: #avoid overlap - X_new = np.array([[0], [2]]) - X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance - y_predict = X_new_b.dot(np.array([intercept_random, slope_random])) - plt.plot(X_new, y_predict, "g-", linewidth=2, label = "Random line") - - - ### (4.3) Apply gradient desc - if gradient_descent: - b = intercept_random - w = slope_random - - lr = learning_rate # learning rate - iteration = iteration - - if gradient_descent_type == 'Batch GradientDescent': - # Store initial values for plotting. - b_history = [b] - w_history = [w] - - train_mse = [] - # Iterations - for i in range(iteration): - b_grad = 0.0 - w_grad = 0.0 - for n in range(len(X)): - b_grad = b_grad - 2*(y[n,0] - b - w*X[n,0])*1.0 - w_grad = w_grad - 2*(y[n,0] - b - w*X[n,0])*X[n,0] - b_grad /= len(X) - w_grad /= len(X) - - # Update parameters. - b = b - lr * b_grad - w = w - lr * w_grad - - # Store parameters for plotting - b_history.append(b) - w_history.append(w) - - train_mse.append(cal_mse(X,y,b,w)) - elif gradient_descent_type == 'Stochastic GradientDescent': - # Store initial values for plotting. - b_history = [b] - w_history = [w] - - train_mse = [] - # Iterations - for i in range(iteration): - for n in range(len(X)): - random_index = np.random.randint(len(X)) - b_grad = -2*(y[random_index,0] - b - w*X[random_index,0])*1.0 - w_grad = -2*(y[random_index,0] - b - w*X[random_index,0])*X[random_index,0] - - # Update parameters. - b = b - lr * b_grad - w = w - lr * w_grad - - # Store parameters for plotting - b_history.append(b) - w_history.append(w) - - train_mse.append(cal_mse(X,y,b,w)) - if gradient_descent_type == 'Mini-Batch GradientDescent': - # Store initial values for plotting. - b_history = [b] - w_history = [w] - - train_mse = [] - # Iterations - minibatch_size = mini_batchsize - for i in range(iteration): - # shuffle dataset - shuffled_indices = np.random.permutation(len(X)) - X_b_shuffled = X[shuffled_indices] - y_shuffled = y[shuffled_indices] - for k in range(0, len(X), minibatch_size): - X_mini = X_b_shuffled[k:k+minibatch_size] - y_mini = y_shuffled[k:k+minibatch_size] - - b_grad = 0.0 - w_grad = 0.0 - for n in range(len(X_mini)): - b_grad = b_grad - 2*(y_mini[n,0] - b - w*X_mini[n,0])*1.0 - w_grad = w_grad - 2*(y_mini[n,0] - b - w*X_mini[n,0])*X_mini[n,0] - b_grad /= len(X_mini) - w_grad /= len(X_mini) - - # Update parameters. - b = b - lr * b_grad - w = w - lr * w_grad - - # Store parameters for plotting - b_history.append(b) - w_history.append(w) - - train_mse.append(cal_mse(X,y,b,w)) - - plt.xlabel("$x_1$", fontsize=22) - plt.ylabel("$y$", rotation=0, fontsize=22) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) - plt.axis([np.min(X)*0.1, np.max(X)*1.1, np.min(y)*0.1, np.max(y)*1.1]) - plt.title("Linear Regression model predictions", fontsize=22) - plt.legend(fontsize=18) - plt.xlim(0,2) - plt.ylim(-10,10) - - - - - - ### (5) Visualize loss function - plt.subplot(3,1,2) - - ### (5.1) generate grid of parameters - b = np.arange(-10,10,0.1) #bias - w = np.arange(-10,10,0.1) #weight - - ### (5.2) Calculate MSE over parameters - Z = np.zeros((len(w), len(b))) - - for i in range(len(w)): - for j in range(len(b)): - w0 = w[i] - b0 = b[j] - Z[i][j] = cal_mse(X, y, b0, w0) - - - ### (5.3) Get optimal parameters - theta0_best = lin_reg.intercept_[0] - theta1_best = lin_reg.coef_[0][0] - - - ### (5.4) Draw the contour graph - plt.contourf(b,w,Z, 50, alpha=0.5, cmap=plt.get_cmap('jet')) - - ### (5.5) Add optimal loss - plt.plot(theta0_best, theta1_best, 'x', ms=12, markeredgewidth=3, color='orange') - plt.text(theta0_best, theta1_best,'MSE:'+str(np.round(cal_mse(X,y,theta0_best, theta1_best),2)), color='red', fontsize=22) - - - ### (5.6) Add loss of random lines - if intercept_random != intercept or slope_random != slope: #avoid overlap - plt.plot(intercept_random, slope_random, 'o', ms=5, markeredgewidth=3, color='orange') - plt.text(intercept_random, slope_random,'MSE:'+str(np.round(cal_mse(X,y,intercept_random, slope_random),2)), fontsize=22) - - ### (5.7) draw gradient updates - if gradient_descent: - plt.plot(b_history, w_history, 'o-', ms=3, lw=1.5, color='black') - plt.title("Visualization of Gradient Descent Process ("+gradient_descent_type+")", fontsize=22) - else: - plt.title("Visualization of Loss Function Map", fontsize=22) - else: - plt.title("Visualization of Loss Function Map", fontsize=22) - plt.xlabel("$Intercept$", fontsize=22) - plt.ylabel("$Slope$", rotation=0, fontsize=22) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) - plt.xlim(-10,10) - plt.ylim(-10,10) - - - ### 6. Visualize the learning curves - if gradient_descent: - plt.subplot(3,1,3) - plt.plot(train_mse,label="train_loss (lr="+str(learning_rate)+")") - plt.xlabel('Iteration',fontweight="bold",fontsize = 22) - plt.ylabel('Loss',fontweight="bold",fontsize = 22) - plt.title("Learning curve: Loss VS Epochs",fontweight="bold",fontsize = 22) - plt.legend(fontsize=18) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) - - #plt.show() - fig.tight_layout() - plt.savefig('plot_line.png', dpi=300) - return 'plot_line.png' - - -#### Define input component -input_sample = gr.inputs.Slider(1, 5000, step=50, default=100, label='N samples') -input_intercept = gr.inputs.Slider(1, 8, step=0.5, default=4, label='(Baseline) Intercept') -input_slope = gr.inputs.Slider(-8, 8, step=0.5, default=2.8, label='(Baseline) Slope') - -input_intercept_random = gr.inputs.Slider(-8, 8, step=0.5, default=-7.5, label='(Random) Intercept') -input_slope_random = gr.inputs.Slider(-8, 8, step=0.5, default=7.5, label='(Random) Slope') - -input_gradients = gr.inputs.Checkbox(label="Apply Gradient Descent") -#input_gradients_type = gr.inputs.CheckboxGroup(['Batch GradientDescient', 'Stochastic GradientDescent', 'Mini-Batch GradientDescent'],label="Type of Gradient Descent") -input_gradients_type = gr.inputs.Dropdown(['Batch GradientDescent', 'Stochastic GradientDescent', 'Mini-Batch GradientDescent'],label="Type of Gradient Descent") - - -input_batchsize = gr.inputs.Slider(1, 64, step=1, default=32, label='Batch size for Mini-BatchGD') - -input_learningrate = gr.inputs.Slider(0,2, step=0.001, default=0.001, label='Learning Rate') -input_iteration = gr.inputs.Slider(1, 1000, step=2, default=100, label='Iteration') - - -#### Define output component -output_plot1 = gr.outputs.Image(label="Regression plot") - - -### configure gradio, detailed can be found at https://www.gradio.app/docs/#i_slider -interface = gr.Interface(fn=gradient_descent, - inputs=[input_sample, input_intercept, input_slope, input_intercept_random, input_slope_random, input_gradients, input_gradients_type, input_learningrate, input_iteration, input_batchsize], - outputs=[output_plot1], - examples_per_page = 2, - #examples = [[4, 3, -7, -5, True, 0.0001, 100], [1, 2, -7, -8, False, 0.0001, 100]], - title="CSCI4750/5750: Regression models (Batch/Mini-Batch/Stochastic Gradient Descent)", - description= "Click examples to generate random dataset and select gradient descent parameters", - theme = 'huggingface', - layout = 'vertical' - ) - -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/Samhita/geolocator/gantry_callback/gantry_util.py b/spaces/Samhita/geolocator/gantry_callback/gantry_util.py deleted file mode 100644 index 6e6f9db4df1aa38cce148edafc5676c6d0276d60..0000000000000000000000000000000000000000 --- a/spaces/Samhita/geolocator/gantry_callback/gantry_util.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Class to handle flagging in Gradio to Gantry. - -Originally written by the FSDL educators at https://github.com/full-stack-deep-learning/fsdl-text-recognizer-2022/blob/main/app_gradio/flagging.py -that has been adjusted for the geolocator project. -""" - -import os -from typing import List, Optional, Union - -import gantry -import gradio as gr -from gradio.components import Component -from smart_open import open - -from .s3_util import ( - add_access_policy, - enable_bucket_versioning, - get_or_create_bucket, - get_uri_of, - make_key, -) -from .string_img_util import read_b64_string - - -class GantryImageToTextLogger(gr.FlaggingCallback): - """ - A FlaggingCallback that logs flagged image-to-text data to Gantry via S3. - """ - - def __init__( - self, - application: str, - version: Union[int, str, None] = None, - api_key: Optional[str] = None, - ): - """Logs image-to-text data that was flagged in Gradio to Gantry. - - Images are logged to Amazon Web Services' Simple Storage Service (S3). - - The flagging_dir provided to the Gradio interface is used to set the - name of the bucket on S3 into which images are logged. - - See the following tutorial by Dan Bader for a quick overview of S3 and the AWS SDK - for Python, boto3: https://realpython.com/python-boto3-aws-s3/ - - See https://gradio.app/docs/#flagging for details on how - flagging data is handled by Gradio. - - See https://docs.gantry.io for information about logging data to Gantry. - - Parameters - ---------- - application - The name of the application on Gantry to which flagged data should be uploaded. - Gantry validates and monitors data per application. - version - The schema version to use during validation by Gantry. If not provided, Gantry - will use the latest version. A new version will be created if the provided version - does not exist yet. - api_key - Optionally, provide your Gantry API key here. Provided for convenience - when testing and developing locally or in notebooks. The API key can - alternatively be provided via the GANTRY_API_KEY environment variable. - """ - self.application = application - self.version = version - gantry.init(api_key=api_key) - - def setup(self, components: List[Component], flagging_dir: str): - """Sets up the GantryImageToTextLogger by creating or attaching to an S3 Bucket.""" - self._counter = 0 - self.bucket = get_or_create_bucket(flagging_dir) - enable_bucket_versioning(self.bucket) - add_access_policy(self.bucket) - ( - self.image_component_idx, - self.text_component_idx, - self.text_component2_idx, - ) = self._find_image_video_and_text_components(components) - - def flag(self, flag_data, flag_option=None, flag_index=None, username=None) -> int: - """Sends flagged outputs and feedback to Gantry and image inputs to S3.""" - - image = flag_data[self.image_component_idx] - text = flag_data[self.text_component_idx] - text2 = flag_data[self.text_component2_idx] - - feedback = {"flag": flag_option} - if username is not None: - feedback["user"] = username - - data_type, image_buffer = read_b64_string(image, return_data_type=True) - image_url = self._to_s3(image_buffer.read(), filetype=data_type) - - self._to_gantry( - input_image_url=image_url, - pred_location=text, - pred_coordinates=text2, - feedback=feedback, - ) - self._counter += 1 - - return self._counter - - def _to_gantry(self, input_image_url, pred_location, pred_coordinates, feedback): - inputs = {"image": input_image_url} - outputs = {"location": pred_location, "coordinates": pred_coordinates} - - gantry.log_record( - self.application, - self.version, - inputs=inputs, - outputs=outputs, - feedback=feedback, - ) - - def _to_s3(self, image_bytes, key=None, filetype=None): - if key is None: - key = make_key(image_bytes, filetype=filetype) - - s3_uri = get_uri_of(self.bucket, key) - - with open(s3_uri, "wb") as s3_object: - s3_object.write(image_bytes) - - return s3_uri - - def _find_image_video_and_text_components(self, components: List[Component]): - """ - Manual indexing of images and text components - """ - - image_component_idx = 0 - text_component_idx = 1 - text_component2_idx = 2 - - return ( - image_component_idx, - text_component_idx, - text_component2_idx, - ) - - -def get_api_key() -> Optional[str]: - """Convenience method for fetching the Gantry API key.""" - api_key = os.environ.get("GANTRY_API_KEY") - return api_key diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/layers/Resnet.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/layers/Resnet.py deleted file mode 100644 index 72f07db4a7b8d9395e2ac7a8ad51d7607ee21959..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/layers/Resnet.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, stride=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = F.relu(self.bn1(self.conv1(x)), inplace=True) - out = F.relu(self.bn2(self.conv2(out)), inplace=True) - out = self.bn3(self.conv3(out)) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = F.relu(out, inplace=True) - - return out - - -class ResNet(nn.Module): - """ Resnet """ - def __init__(self, architecture): - super(ResNet, self).__init__() - assert architecture in ["resnet50", "resnet101"] - self.inplanes = 64 - self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3] - self.block = Bottleneck - - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.01, affine=True) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) - - self.layer1 = self.make_layer(self.block, 64, self.layers[0]) - self.layer2 = self.make_layer(self.block, 128, self.layers[1], stride=2) - self.layer3 = self.make_layer(self.block, 256, self.layers[2], stride=2) - - self.layer4 = self.make_layer( - self.block, 512, self.layers[3], stride=2) - - def forward(self, x): - x = self.maxpool(self.relu(self.bn1(self.conv1(x)))) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def stages(self): - return [self.layer1, self.layer2, self.layer3, self.layer4] - - def make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) diff --git a/spaces/SebastianEnger/AI-TextGenerator/index.html b/spaces/SebastianEnger/AI-TextGenerator/index.html deleted file mode 100644 index 25162b65fea35fcadb8944557bd9475754b33b09..0000000000000000000000000000000000000000 --- a/spaces/SebastianEnger/AI-TextGenerator/index.html +++ /dev/null @@ -1,295 +0,0 @@ - - - - - CopyWriting: Generator for Marketing Content by AI | www.unaique.net - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      -
      -
      -

      AI Text Generator: Write free text, article, blog, text and journal with the fanzy text generator powered by Artificial Intelligence.

      -
      - -
      -
      -
      - -
      -
      -
      - -
      -
      -
      - -
      -
      -
      - -
      - Share with friends:
      - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -   -   - -
      -
      -
      - -
      -
      -
      - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/ServerX/PorcoDiaz/gui_v1.py b/spaces/ServerX/PorcoDiaz/gui_v1.py deleted file mode 100644 index becba80cdda6987c1ad70c89e68a4e3a4da44639..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/gui_v1.py +++ /dev/null @@ -1,708 +0,0 @@ -import os -import logging -import sys -from dotenv import load_dotenv - -load_dotenv() - -os.environ["OMP_NUM_THREADS"] = "4" -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -now_dir = os.getcwd() -sys.path.append(now_dir) -import multiprocessing - -logger = logging.getLogger(__name__) - - -class Harvest(multiprocessing.Process): - def __init__(self, inp_q, opt_q): - multiprocessing.Process.__init__(self) - self.inp_q = inp_q - self.opt_q = opt_q - - def run(self): - import numpy as np - import pyworld - - while 1: - idx, x, res_f0, n_cpu, ts = self.inp_q.get() - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - res_f0[idx] = f0 - if len(res_f0.keys()) >= n_cpu: - self.opt_q.put(ts) - - -if __name__ == "__main__": - import json - import multiprocessing - import re - import threading - import time - import traceback - from multiprocessing import Queue, cpu_count - from queue import Empty - - import librosa - from tools.torchgate import TorchGate - import numpy as np - import PySimpleGUI as sg - import sounddevice as sd - import torch - import torch.nn.functional as F - import torchaudio.transforms as tat - - import tools.rvc_for_realtime as rvc_for_realtime - from i18n.i18n import I18nAuto - - i18n = I18nAuto() - device = rvc_for_realtime.config.device - # device = torch.device( - # "cuda" - # if torch.cuda.is_available() - # else ("mps" if torch.backends.mps.is_available() else "cpu") - # ) - current_dir = os.getcwd() - inp_q = Queue() - opt_q = Queue() - n_cpu = min(cpu_count(), 8) - for _ in range(n_cpu): - Harvest(inp_q, opt_q).start() - - class GUIConfig: - def __init__(self) -> None: - self.pth_path: str = "" - self.index_path: str = "" - self.pitch: int = 0 - self.samplerate: int = 40000 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -60 - self.crossfade_time: float = 0.04 - self.extra_time: float = 2.0 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.rms_mix_rate = 0.0 - self.index_rate = 0.3 - self.n_cpu = min(n_cpu, 6) - self.f0method = "harvest" - self.sg_input_device = "" - self.sg_output_device = "" - - class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - input_devices, output_devices, _, _ = self.get_devices() - try: - with open("configs/config.json", "r") as j: - data = json.load(j) - data["pm"] = data["f0method"] == "pm" - data["harvest"] = data["f0method"] == "harvest" - data["crepe"] = data["f0method"] == "crepe" - data["rmvpe"] = data["f0method"] == "rmvpe" - except: - with open("configs/config.json", "w") as j: - data = { - "pth_path": " ", - "index_path": " ", - "sg_input_device": input_devices[sd.default.device[0]], - "sg_output_device": output_devices[sd.default.device[1]], - "threhold": "-60", - "pitch": "0", - "index_rate": "0", - "rms_mix_rate": "0", - "block_time": "0.25", - "crossfade_length": "0.04", - "extra_time": "2", - "f0method": "rmvpe", - } - data["pm"] = data["f0method"] == "pm" - data["harvest"] = data["f0method"] == "harvest" - data["crepe"] = data["f0method"] == "crepe" - data["rmvpe"] = data["f0method"] == "rmvpe" - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join( - os.getcwd(), "assets/weights" - ), - file_types=((". pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=((". index"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - [sg.Button(i18n("重载设备列表"), key="reload_devices")], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", "-60"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("响度因子")), - sg.Slider( - range=(0.0, 1.0), - key="rms_mix_rate", - resolution=0.01, - orientation="h", - default_value=data.get("rms_mix_rate", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("音高算法")), - sg.Radio( - "pm", - "f0method", - key="pm", - default=data.get("pm", "") == True, - enable_events=True, - ), - sg.Radio( - "harvest", - "f0method", - key="harvest", - default=data.get("harvest", "") == True, - enable_events=True, - ), - sg.Radio( - "crepe", - "f0method", - key="crepe", - default=data.get("crepe", "") == True, - enable_events=True, - ), - sg.Radio( - "rmvpe", - "f0method", - key="rmvpe", - default=data.get("rmvpe", "") == True, - enable_events=True, - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.05, 2.4), - key="block_time", - resolution=0.01, - orientation="h", - default_value=data.get("block_time", "0.25"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("harvest进程数")), - sg.Slider( - range=(1, n_cpu), - key="n_cpu", - resolution=1, - orientation="h", - default_value=data.get( - "n_cpu", min(self.config.n_cpu, n_cpu) - ), - enable_events=True, - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", "0.04"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 5.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", "2.0"), - enable_events=True, - ), - ], - [ - sg.Checkbox( - i18n("输入降噪"), - key="I_noise_reduce", - enable_events=True, - ), - sg.Checkbox( - i18n("输出降噪"), - key="O_noise_reduce", - enable_events=True, - ), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout, finalize=True) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "reload_devices": - prev_input = self.window["sg_input_device"].get() - prev_output = self.window["sg_output_device"].get() - input_devices, output_devices, _, _ = self.get_devices(update=True) - if prev_input not in input_devices: - self.config.sg_input_device = input_devices[0] - else: - self.config.sg_input_device = prev_input - self.window["sg_input_device"].Update(values=input_devices) - self.window["sg_input_device"].Update( - value=self.config.sg_input_device - ) - if prev_output not in output_devices: - self.config.sg_output_device = output_devices[0] - else: - self.config.sg_output_device = prev_output - self.window["sg_output_device"].Update(values=output_devices) - self.window["sg_output_device"].Update( - value=self.config.sg_output_device - ) - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - logger.info("Use CUDA: %s", torch.cuda.is_available()) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "rms_mix_rate": values["rms_mix_rate"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - "n_cpu": values["n_cpu"], - "f0method": ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ], - } - with open("configs/config.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - # Parameter hot update - if event == "threhold": - self.config.threhold = values["threhold"] - elif event == "pitch": - self.config.pitch = values["pitch"] - if hasattr(self, "rvc"): - self.rvc.change_key(values["pitch"]) - elif event == "index_rate": - self.config.index_rate = values["index_rate"] - if hasattr(self, "rvc"): - self.rvc.change_index_rate(values["index_rate"]) - elif event == "rms_mix_rate": - self.config.rms_mix_rate = values["rms_mix_rate"] - elif event in ["pm", "harvest", "crepe", "rmvpe"]: - self.config.f0method = event - elif event == "I_noise_reduce": - self.config.I_noise_reduce = values["I_noise_reduce"] - elif event == "O_noise_reduce": - self.config.O_noise_reduce = values["O_noise_reduce"] - elif event != "start_vc" and self.flag_vc == True: - # Other parameters do not support hot update - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.rms_mix_rate = values["rms_mix_rate"] - self.config.index_rate = values["index_rate"] - self.config.n_cpu = values["n_cpu"] - self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.rvc = rvc_for_realtime.RVC( - self.config.pitch, - self.config.pth_path, - self.config.index_path, - self.config.index_rate, - self.config.n_cpu, - inp_q, - opt_q, - device, - self.rvc if hasattr(self, "rvc") else None - ) - self.config.samplerate = self.rvc.tgt_sr - self.zc = self.rvc.tgt_sr // 100 - self.block_frame = int(np.round(self.config.block_time * self.config.samplerate / self.zc)) * self.zc - self.block_frame_16k = 160 * self.block_frame // self.zc - self.crossfade_frame = int(np.round(self.config.crossfade_time * self.config.samplerate / self.zc)) * self.zc - self.sola_search_frame = self.zc - self.extra_frame = int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) * self.zc - self.input_wav: torch.Tensor = torch.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - device=device, - dtype=torch.float32, - ) - self.input_wav_res: torch.Tensor= torch.zeros(160 * self.input_wav.shape[0] // self.zc, device=device,dtype=torch.float32) - self.pitch: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="int32", - ) - self.pitchf: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="float64", - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.nr_buffer: torch.Tensor = self.sola_buffer.clone() - self.output_buffer: torch.Tensor = self.input_wav.clone() - self.res_buffer: torch.Tensor = torch.zeros(2 * self.zc, device=device,dtype=torch.float32) - self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0] - self.fade_in_window: torch.Tensor = ( - torch.sin( - 0.5 - * np.pi - * torch.linspace( - 0.0, - 1.0, - steps=self.crossfade_frame, - device=device, - dtype=torch.float32, - ) - ) - ** 2 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ).to(device) - self.tg = TorchGate(sr=self.config.samplerate, n_fft=4*self.zc, prop_decrease=0.9).to(device) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - channels = 1 if sys.platform == "darwin" else 2 - with sd.Stream( - channels=channels, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - logger.debug("Audio block passed.") - logger.debug("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.threhold > -60: - rms = librosa.feature.rms( - y=indata, frame_length=4*self.zc, hop_length=self.zc - ) - db_threhold = ( - librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - ) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * self.zc : (i + 1) * self.zc] = 0 - self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone() - self.input_wav[-self.block_frame: ] = torch.from_numpy(indata).to(device) - self.input_wav_res[ : -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone() - # input noise reduction and resampling - if self.config.I_noise_reduce: - input_wav = self.input_wav[-self.crossfade_frame -self.block_frame-2*self.zc: ] - input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2*self.zc:] - input_wav[: self.crossfade_frame] *= self.fade_in_window - input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window - self.nr_buffer[:] = input_wav[-self.crossfade_frame: ] - input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame])) - self.res_buffer[:] = input_wav[-2*self.zc: ] - self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(input_wav)[160: ] - else: - self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(self.input_wav[-self.block_frame-2*self.zc: ])[160: ] - # infer - f0_extractor_frame = self.block_frame_16k + 800 - if self.config.f0method == 'rmvpe': - f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - infer_wav = self.rvc.infer( - self.input_wav_res, - self.input_wav_res[-f0_extractor_frame :].cpu().numpy(), - self.block_frame_16k, - self.valid_rate, - self.pitch, - self.pitchf, - self.config.f0method, - ) - infer_wav = infer_wav[ - -self.crossfade_frame - self.sola_search_frame - self.block_frame : - ] - # output noise reduction - if self.config.O_noise_reduce: - self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone() - self.output_buffer[-self.block_frame: ] = infer_wav[-self.block_frame:] - infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0) - # volume envelop mixing - if self.config.rms_mix_rate < 1: - rms1 = librosa.feature.rms( - y=self.input_wav_res[-160*infer_wav.shape[0]//self.zc :].cpu().numpy(), - frame_length=640, - hop_length=160, - ) - rms1 = torch.from_numpy(rms1).to(device) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True, - )[0,0,:-1] - rms2 = librosa.feature.rms( - y=infer_wav[:].cpu().numpy(), frame_length=4*self.zc, hop_length=self.zc - ) - rms2 = torch.from_numpy(rms2).to(device) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True, - )[0,0,:-1] - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3) - infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate)) - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :]) - cor_den = torch.sqrt( - F.conv1d(conv_input ** 2, torch.ones(1, 1, self.crossfade_frame, device=device)) + 1e-8) - if sys.platform == "darwin": - _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0]) - sola_offset = sola_offset.item() - else: - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - logger.debug("sola_offset = %d", int(sola_offset)) - infer_wav = infer_wav[sola_offset: sola_offset + self.block_frame + self.crossfade_frame] - infer_wav[: self.crossfade_frame] *= self.fade_in_window - infer_wav[: self.crossfade_frame] += self.sola_buffer *self.fade_out_window - self.sola_buffer[:] = infer_wav[-self.crossfade_frame:] - if sys.platform == "darwin": - outdata[:] = infer_wav[:-self.crossfade_frame].cpu().numpy()[:, np.newaxis] - else: - outdata[:] = infer_wav[:-self.crossfade_frame].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - logger.info("Infer time: %.2f", total_time) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[ - input_devices.index(input_device) - ] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - logger.info( - "Input device: %s:%s", str(sd.default.device[0]), input_device - ) - logger.info( - "Output device: %s:%s", str(sd.default.device[1]), output_device - ) - - gui = GUI() \ No newline at end of file diff --git a/spaces/ServerX/PorcoDiaz/utils/README.md b/spaces/ServerX/PorcoDiaz/utils/README.md deleted file mode 100644 index fb45a36b5909585aa964f2033762ee59b55526b0..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/utils/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# External Colab Code -Code used to make Google Colab work correctly -- Repo link: https://github.com/IAHispano/Applio-RVC-Fork/ - -Thanks to https://github.com/kalomaze/externalcolabcode - diff --git a/spaces/Shawn37/UTR_LM/esm/modules.py b/spaces/Shawn37/UTR_LM/esm/modules.py deleted file mode 100644 index 5d7ee4741b23b6b88ead11c56ff20e978db678d3..0000000000000000000000000000000000000000 --- a/spaces/Shawn37/UTR_LM/esm/modules.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .multihead_attention import MultiheadAttention # noqa -from .axial_attention import ColumnSelfAttention, RowSelfAttention - - -def gelu(x): - """Implementation of the gelu activation function. - For information: OpenAI GPT's gelu is slightly different - (and gives slightly different results): - 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) - """ - return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) - - -def symmetrize(x): - "Make layer symmetric in final two dimensions, used for contact prediction." - return x + x.transpose(-1, -2) - - -def apc(x): - "Perform average product correct, used for contact prediction." - a1 = x.sum(-1, keepdims=True) - a2 = x.sum(-2, keepdims=True) - a12 = x.sum((-1, -2), keepdims=True) - - avg = a1 * a2 - avg.div_(a12) # in-place to reduce memory - normalized = x - avg - return normalized - - -class ESM1LayerNorm(nn.Module): - def __init__(self, hidden_size, eps=1e-12, affine=True): - """Construct a layernorm layer in the TF style (eps inside the sqrt).""" - super().__init__() - self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size) - self.eps = eps - self.affine = bool(affine) - if self.affine: - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.bias = nn.Parameter(torch.zeros(hidden_size)) - else: - self.weight, self.bias = None, None - - def forward(self, x): - dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) - means = x.mean(dims, keepdim=True) - x_zeromean = x - means - variances = x_zeromean.pow(2).mean(dims, keepdim=True) - x = x_zeromean / torch.sqrt(variances + self.eps) - if self.affine: - x = (self.weight * x) + self.bias - return x - - -try: - from apex.normalization import FusedLayerNorm as _FusedLayerNorm - - class ESM1bLayerNorm(_FusedLayerNorm): - @torch.jit.unused - def forward(self, x): - if not x.is_cuda: - return super().forward(x) - else: - with torch.cuda.device(x.device): - return super().forward(x) - -except ImportError: - from torch.nn import LayerNorm as ESM1bLayerNorm - - -class TransformerLayer(nn.Module): - """Transformer layer block.""" - - def __init__( - self, - embed_dim, - ffn_embed_dim, - attention_heads, - add_bias_kv=True, - use_esm1b_layer_norm=False, - use_rotary_embeddings: bool = False, - ): - super().__init__() - self.embed_dim = embed_dim - self.ffn_embed_dim = ffn_embed_dim - self.attention_heads = attention_heads - self.use_rotary_embeddings = use_rotary_embeddings - self._init_submodules(add_bias_kv, use_esm1b_layer_norm) - - def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): - BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm - - self.self_attn = MultiheadAttention( - self.embed_dim, - self.attention_heads, - add_bias_kv=add_bias_kv, - add_zero_attn=False, - use_rotary_embeddings=self.use_rotary_embeddings, - ) - self.self_attn_layer_norm = BertLayerNorm(self.embed_dim) - - self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim) - self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim) - - self.final_layer_norm = BertLayerNorm(self.embed_dim) - - def forward( - self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False - ): - residual = x - x = self.self_attn_layer_norm(x) - x, attn = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=self_attn_padding_mask, - need_weights=True, - need_head_weights=need_head_weights, - attn_mask=self_attn_mask, - ) - x = residual + x - - residual = x - x = self.final_layer_norm(x) - x = gelu(self.fc1(x)) - x = self.fc2(x) - x = residual + x - #print(f'------{attn.half().dtype}-----') - - return x, attn#.half() ### - - -class AxialTransformerLayer(nn.Module): - """Implements an Axial MSA Transformer block.""" - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - max_tokens_per_msa: int = 2**14, - ) -> None: - super().__init__() - - # Initialize parameters - self.embedding_dim = embedding_dim - self.dropout_prob = dropout - - row_self_attention = RowSelfAttention( - embedding_dim, - num_attention_heads, - dropout=dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - column_self_attention = ColumnSelfAttention( - embedding_dim, - num_attention_heads, - dropout=dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - feed_forward_layer = FeedForwardNetwork( - embedding_dim, - ffn_embedding_dim, - activation_dropout=activation_dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - self.row_self_attention = self.build_residual(row_self_attention) - self.column_self_attention = self.build_residual(column_self_attention) - self.feed_forward_layer = self.build_residual(feed_forward_layer) - - def build_residual(self, layer: nn.Module): - return NormalizedResidualBlock( - layer, - self.embedding_dim, - self.dropout_prob, - ) - - def forward( - self, - x: torch.Tensor, - self_attn_mask: Optional[torch.Tensor] = None, - self_attn_padding_mask: Optional[torch.Tensor] = None, - need_head_weights: bool = False, - ): - """ - LayerNorm is applied either before or after the self-attention/ffn - modules similar to the original Transformer implementation. - """ - x, row_attn = self.row_self_attention( - x, - self_attn_mask=self_attn_mask, - self_attn_padding_mask=self_attn_padding_mask, - ) - x, column_attn = self.column_self_attention( - x, - self_attn_mask=self_attn_mask, - self_attn_padding_mask=self_attn_padding_mask, - ) - x = self.feed_forward_layer(x) - if need_head_weights: - return x, column_attn, row_attn - else: - return x - - -class LearnedPositionalEmbedding(nn.Embedding): - """ - This module learns positional embeddings up to a fixed maximum size. - Padding ids are ignored by either offsetting based on padding_idx - or by setting padding_idx to None and ensuring that the appropriate - position ids are passed to the forward function. - """ - - def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): - if padding_idx is not None: - num_embeddings_ = num_embeddings + padding_idx + 1 - else: - num_embeddings_ = num_embeddings - super().__init__(num_embeddings_, embedding_dim, padding_idx) - self.max_positions = num_embeddings - - def forward(self, input: torch.Tensor): - """Input is expected to be of size [bsz x seqlen].""" - if input.size(1) > self.max_positions: - raise ValueError( - f"Sequence length {input.size(1)} above maximum " - f" sequence length of {self.max_positions}" - ) - mask = input.ne(self.padding_idx).int() - positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx - return F.embedding( - positions, - self.weight, - self.padding_idx, - self.max_norm, - self.norm_type, - self.scale_grad_by_freq, - self.sparse, - ) - - -class SinusoidalPositionalEmbedding(nn.Module): - def __init__(self, embed_dim, padding_idx, learned=False): - super().__init__() - self.embed_dim = embed_dim - self.padding_idx = padding_idx - self.register_buffer("_float_tensor", torch.FloatTensor(1)) - self.weights = None - - def forward(self, x): - bsz, seq_len = x.shape - max_pos = self.padding_idx + 1 + seq_len - if self.weights is None or max_pos > self.weights.size(0): - self.weights = self.get_embedding(max_pos) - self.weights = self.weights.type_as(self._float_tensor) - - positions = self.make_positions(x) - return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() - - def make_positions(self, x): - mask = x.ne(self.padding_idx) - range_buf = torch.arange(x.size(1), device=x.device).expand_as(x) + self.padding_idx + 1 - positions = range_buf.expand_as(x) - return positions * mask.long() + self.padding_idx * (1 - mask.long()) - - def get_embedding(self, num_embeddings): - half_dim = self.embed_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) - emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) - if self.embed_dim % 2 == 1: - # zero pad - emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) - if self.padding_idx is not None: - emb[self.padding_idx, :] = 0 - return emb - - -class RobertaLMHead(nn.Module): - """Head for masked language modeling.""" - - def __init__(self, embed_dim, output_dim, weight): - super().__init__() - self.dense = nn.Linear(embed_dim, embed_dim) - self.layer_norm = ESM1bLayerNorm(embed_dim) - self.weight = weight - self.bias = nn.Parameter(torch.zeros(output_dim)) - - def forward(self, features): - x = self.dense(features) - x = gelu(x) - x = self.layer_norm(x) - # project back to size of vocabulary with bias - x = F.linear(x, self.weight) + self.bias - return x - - -class ContactPredictionHead(nn.Module): - """Performs symmetrization, apc, and computes a logistic regression on the output features""" - - def __init__( - self, - in_features: int, - prepend_bos: bool, - append_eos: bool, - bias=True, - eos_idx: Optional[int] = None, - ): - super().__init__() - self.in_features = in_features - self.prepend_bos = prepend_bos - self.append_eos = append_eos - if append_eos and eos_idx is None: - raise ValueError("Using an alphabet with eos token, but no eos token was passed in.") - self.eos_idx = eos_idx - self.regression = nn.Linear(in_features, 1, bias) - self.activation = nn.Sigmoid() - - def forward(self, tokens, attentions): - # remove eos token attentions - if self.append_eos: - eos_mask = tokens.ne(self.eos_idx).to(attentions) - eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) - attentions = attentions * eos_mask[:, None, None, :, :] - attentions = attentions[..., :-1, :-1] - # remove cls token attentions - if self.prepend_bos: - attentions = attentions[..., 1:, 1:] - batch_size, layers, heads, seqlen, _ = attentions.size() - attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) - - # features: B x C x T x T - attentions = attentions.to( - self.regression.weight.device - ) # attentions always float32, may need to convert to float16 - attentions = apc(symmetrize(attentions)) - attentions = attentions.permute(0, 2, 3, 1) - #print(f'----------{attentions.dtype, attentions.float().dtype}----') - return attentions.sum(dim=-1), self.activation(self.regression(attentions).squeeze(3))#float().to(self.regression.weight.device)).squeeze(3)) - - -class NormalizedResidualBlock(nn.Module): - def __init__( - self, - layer: nn.Module, - embedding_dim: int, - dropout: float = 0.1, - ): - super().__init__() - self.embedding_dim = embedding_dim - - self.layer = layer - self.dropout_module = nn.Dropout( - dropout, - ) - self.layer_norm = ESM1bLayerNorm(self.embedding_dim) - - def forward(self, x, *args, **kwargs): - residual = x - x = self.layer_norm(x) - outputs = self.layer(x, *args, **kwargs) - if isinstance(outputs, tuple): - x, *out = outputs - else: - x = outputs - out = None - - x = self.dropout_module(x) - x = residual + x - - if out is not None: - return (x,) + tuple(out) - else: - return x - - -class FeedForwardNetwork(nn.Module): - def __init__( - self, - embedding_dim: int, - ffn_embedding_dim: int, - activation_dropout: float = 0.1, - max_tokens_per_msa: int = 2**14, - ): - super().__init__() - self.embedding_dim = embedding_dim - self.ffn_embedding_dim = ffn_embedding_dim - self.max_tokens_per_msa = max_tokens_per_msa - self.activation_fn = nn.GELU() - self.activation_dropout_module = nn.Dropout( - activation_dropout, - ) - self.fc1 = nn.Linear(embedding_dim, ffn_embedding_dim) - self.fc2 = nn.Linear(ffn_embedding_dim, embedding_dim) - - def forward(self, x): - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - x = self.fc2(x) - return x diff --git a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/datasets/transforms.py b/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/datasets/transforms.py deleted file mode 100644 index 91cf9269e4b31008a3ddca34a19b038a9b399991..0000000000000000000000000000000000000000 --- a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/datasets/transforms.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import os -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from groundingdino.util.box_ops import box_xyxy_to_cxcywh -from groundingdino.util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd", "positive_map"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i : i + h, j : j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target["boxes"].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target["masks"].flatten(1).any(1) - - for field in fields: - if field in target: - target[field] = target[field][keep] - - if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": - # for debug and visualization only. - if "strings_positive" in target: - target["strings_positive"] = [ - _i for _i, _j in zip(target["strings_positive"], keep) if _j - ] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( - [w, 0, w, 0] - ) - target["boxes"] = boxes - - if "masks" in target: - target["masks"] = target["masks"].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor( - [ratio_width, ratio_height, ratio_width, ratio_height] - ) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target["masks"] = ( - interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - ) - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class ResizeDebug(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - return resize(img, target, self.size) - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): - # respect_boxes: True to keep all boxes - # False to tolerence box filter - self.min_size = min_size - self.max_size = max_size - self.respect_boxes = respect_boxes - - def __call__(self, img: PIL.Image.Image, target: dict): - init_boxes = len(target["boxes"]) - max_patience = 10 - for i in range(max_patience): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - result_img, result_target = crop(img, target, region) - if ( - not self.respect_boxes - or len(result_target["boxes"]) == init_boxes - or i == max_patience - 1 - ): - return result_img, result_target - return result_img, result_target - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.0)) - crop_left = int(round((image_width - crop_width) / 2.0)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/spaces/SmilingWolf/wd-v1-4-tags/app.py b/spaces/SmilingWolf/wd-v1-4-tags/app.py deleted file mode 100644 index 33fa06d229e5bdad6268136cc0fb55c64909cbfd..0000000000000000000000000000000000000000 --- a/spaces/SmilingWolf/wd-v1-4-tags/app.py +++ /dev/null @@ -1,285 +0,0 @@ -from __future__ import annotations - -import argparse -import functools -import html -import os - -import gradio as gr -import huggingface_hub -import numpy as np -import onnxruntime as rt -import pandas as pd -import piexif -import piexif.helper -import PIL.Image - -from Utils import dbimutils - -TITLE = "WaifuDiffusion v1.4 Tags" -DESCRIPTION = """ -Demo for: -- [SmilingWolf/wd-v1-4-moat-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-moat-tagger-v2) -- [SmilingWolf/wd-v1-4-swinv2-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger-v2) -- [SmilingWolf/wd-v1-4-convnext-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger-v2) -- [SmilingWolf/wd-v1-4-convnextv2-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnextv2-tagger-v2) -- [SmilingWolf/wd-v1-4-vit-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-vit-tagger-v2) - -Includes "ready to copy" prompt and a prompt analyzer. - -Modified from [NoCrypt/DeepDanbooru_string](https://huggingface.co/spaces/NoCrypt/DeepDanbooru_string) -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) - -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - -Example image by [ほし☆☆☆](https://www.pixiv.net/en/users/43565085) -""" - -HF_TOKEN = os.environ["HF_TOKEN"] -MOAT_MODEL_REPO = "SmilingWolf/wd-v1-4-moat-tagger-v2" -SWIN_MODEL_REPO = "SmilingWolf/wd-v1-4-swinv2-tagger-v2" -CONV_MODEL_REPO = "SmilingWolf/wd-v1-4-convnext-tagger-v2" -CONV2_MODEL_REPO = "SmilingWolf/wd-v1-4-convnextv2-tagger-v2" -VIT_MODEL_REPO = "SmilingWolf/wd-v1-4-vit-tagger-v2" -MODEL_FILENAME = "model.onnx" -LABEL_FILENAME = "selected_tags.csv" - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument("--score-slider-step", type=float, default=0.05) - parser.add_argument("--score-general-threshold", type=float, default=0.35) - parser.add_argument("--score-character-threshold", type=float, default=0.85) - parser.add_argument("--share", action="store_true") - return parser.parse_args() - - -def load_model(model_repo: str, model_filename: str) -> rt.InferenceSession: - path = huggingface_hub.hf_hub_download( - model_repo, model_filename, use_auth_token=HF_TOKEN - ) - model = rt.InferenceSession(path) - return model - - -def change_model(model_name): - global loaded_models - - if model_name == "MOAT": - model = load_model(MOAT_MODEL_REPO, MODEL_FILENAME) - elif model_name == "SwinV2": - model = load_model(SWIN_MODEL_REPO, MODEL_FILENAME) - elif model_name == "ConvNext": - model = load_model(CONV_MODEL_REPO, MODEL_FILENAME) - elif model_name == "ConvNextV2": - model = load_model(CONV2_MODEL_REPO, MODEL_FILENAME) - elif model_name == "ViT": - model = load_model(VIT_MODEL_REPO, MODEL_FILENAME) - - loaded_models[model_name] = model - return loaded_models[model_name] - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download( - MOAT_MODEL_REPO, LABEL_FILENAME, use_auth_token=HF_TOKEN - ) - df = pd.read_csv(path) - - tag_names = df["name"].tolist() - rating_indexes = list(np.where(df["category"] == 9)[0]) - general_indexes = list(np.where(df["category"] == 0)[0]) - character_indexes = list(np.where(df["category"] == 4)[0]) - return tag_names, rating_indexes, general_indexes, character_indexes - - -def plaintext_to_html(text): - text = ( - "

      " + "
      \n".join([f"{html.escape(x)}" for x in text.split("\n")]) + "

      " - ) - return text - - -def predict( - image: PIL.Image.Image, - model_name: str, - general_threshold: float, - character_threshold: float, - tag_names: list[str], - rating_indexes: list[np.int64], - general_indexes: list[np.int64], - character_indexes: list[np.int64], -): - global loaded_models - - rawimage = image - - model = loaded_models[model_name] - if model is None: - model = change_model(model_name) - - _, height, width, _ = model.get_inputs()[0].shape - - # Alpha to white - image = image.convert("RGBA") - new_image = PIL.Image.new("RGBA", image.size, "WHITE") - new_image.paste(image, mask=image) - image = new_image.convert("RGB") - image = np.asarray(image) - - # PIL RGB to OpenCV BGR - image = image[:, :, ::-1] - - image = dbimutils.make_square(image, height) - image = dbimutils.smart_resize(image, height) - image = image.astype(np.float32) - image = np.expand_dims(image, 0) - - input_name = model.get_inputs()[0].name - label_name = model.get_outputs()[0].name - probs = model.run([label_name], {input_name: image})[0] - - labels = list(zip(tag_names, probs[0].astype(float))) - - # First 4 labels are actually ratings: pick one with argmax - ratings_names = [labels[i] for i in rating_indexes] - rating = dict(ratings_names) - - # Then we have general tags: pick any where prediction confidence > threshold - general_names = [labels[i] for i in general_indexes] - general_res = [x for x in general_names if x[1] > general_threshold] - general_res = dict(general_res) - - # Everything else is characters: pick any where prediction confidence > threshold - character_names = [labels[i] for i in character_indexes] - character_res = [x for x in character_names if x[1] > character_threshold] - character_res = dict(character_res) - - b = dict(sorted(general_res.items(), key=lambda item: item[1], reverse=True)) - a = ( - ", ".join(list(b.keys())) - .replace("_", " ") - .replace("(", "\(") - .replace(")", "\)") - ) - c = ", ".join(list(b.keys())) - - items = rawimage.info - geninfo = "" - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b"") - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode("utf8", errors="ignore") - - items["exif comment"] = exif_comment - geninfo = exif_comment - - for field in [ - "jfif", - "jfif_version", - "jfif_unit", - "jfif_density", - "dpi", - "exif", - "loop", - "background", - "timestamp", - "duration", - ]: - items.pop(field, None) - - geninfo = items.get("parameters", geninfo) - - info = f""" -

      PNG Info

      -""" - for key, text in items.items(): - info += ( - f""" -
      -

      {plaintext_to_html(str(key))}

      -

      {plaintext_to_html(str(text))}

      -
      -""".strip() - + "\n" - ) - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

      {message}

      " - - return (a, c, rating, character_res, general_res, info) - - -def main(): - global loaded_models - loaded_models = { - "MOAT": None, - "SwinV2": None, - "ConvNext": None, - "ConvNextV2": None, - "ViT": None, - } - - args = parse_args() - - change_model("MOAT") - - tag_names, rating_indexes, general_indexes, character_indexes = load_labels() - - func = functools.partial( - predict, - tag_names=tag_names, - rating_indexes=rating_indexes, - general_indexes=general_indexes, - character_indexes=character_indexes, - ) - - gr.Interface( - fn=func, - inputs=[ - gr.Image(type="pil", label="Input"), - gr.Radio( - ["MOAT", "SwinV2", "ConvNext", "ConvNextV2", "ViT"], - value="MOAT", - label="Model", - ), - gr.Slider( - 0, - 1, - step=args.score_slider_step, - value=args.score_general_threshold, - label="General Tags Threshold", - ), - gr.Slider( - 0, - 1, - step=args.score_slider_step, - value=args.score_character_threshold, - label="Character Tags Threshold", - ), - ], - outputs=[ - gr.Textbox(label="Output (string)"), - gr.Textbox(label="Output (raw string)"), - gr.Label(label="Rating"), - gr.Label(label="Output (characters)"), - gr.Label(label="Output (tags)"), - gr.HTML(), - ], - examples=[["power.jpg", "MOAT", 0.35, 0.85]], - title=TITLE, - description=DESCRIPTION, - allow_flagging="never", - ).launch( - enable_queue=True, - share=args.share, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/relativedelta.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/relativedelta.py deleted file mode 100644 index a9e85f7e6cd7488e6b2f4b249d5cf6af314c3859..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/relativedelta.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -import datetime -import calendar - -import operator -from math import copysign - -from six import integer_types -from warnings import warn - -from ._common import weekday - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - - -class relativedelta(object): - """ - The relativedelta type is designed to be applied to an existing datetime and - can replace specific components of that datetime, or represents an interval - of time. - - It is based on the specification of the excellent work done by M.-A. Lemburg - in his - `mx.DateTime `_ extension. - However, notice that this type does *NOT* implement the same algorithm as - his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - - There are two different ways to build a relativedelta instance. The - first one is passing it two date/datetime classes:: - - relativedelta(datetime1, datetime2) - - The second one is passing it any number of the following keyword arguments:: - - relativedelta(arg1=x,arg2=y,arg3=z...) - - year, month, day, hour, minute, second, microsecond: - Absolute information (argument is singular); adding or subtracting a - relativedelta with absolute information does not perform an arithmetic - operation, but rather REPLACES the corresponding value in the - original datetime with the value(s) in relativedelta. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative (argument is plural); adding - or subtracting a relativedelta with relative information performs - the corresponding arithmetic operation on the original datetime value - with the information in the relativedelta. - - weekday: - One of the weekday instances (MO, TU, etc) available in the - relativedelta module. These instances may receive a parameter N, - specifying the Nth weekday, which could be positive or negative - (like MO(+1) or MO(-2)). Not specifying it is the same as specifying - +1. You can also use an integer, where 0=MO. This argument is always - relative e.g. if the calculated date is already Monday, using MO(1) - or MO(-1) won't change the day. To effectively make it absolute, use - it in combination with the day argument (e.g. day=1, MO(1) for first - Monday of the month). - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - - There are relative and absolute forms of the keyword - arguments. The plural is relative, and the singular is - absolute. For each argument in the order below, the absolute form - is applied first (by setting each attribute to that value) and - then the relative form (by adding the value to the attribute). - - The order of attributes considered when this relativedelta is - added to a datetime is: - - 1. Year - 2. Month - 3. Day - 4. Hours - 5. Minutes - 6. Seconds - 7. Microseconds - - Finally, weekday is applied, using the rule described above. - - For example - - >>> from datetime import datetime - >>> from dateutil.relativedelta import relativedelta, MO - >>> dt = datetime(2018, 4, 9, 13, 37, 0) - >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) - >>> dt + delta - datetime.datetime(2018, 4, 2, 14, 37) - - First, the day is set to 1 (the first of the month), then 25 hours - are added, to get to the 2nd day and 14th hour, finally the - weekday is applied, but since the 2nd is already a Monday there is - no effect. - - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - - if dt1 and dt2: - # datetime is a subclass of date. So both must be date - if not (isinstance(dt1, datetime.date) and - isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - - # We allow two dates, or two datetimes, so we coerce them to be - # of the same type - if (isinstance(dt1, datetime.datetime) != - isinstance(dt2, datetime.datetime)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - # Get year / month delta between the two - months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) - self._set_months(months) - - # Remove the year/month delta so the timedelta is just well-defined - # time units (seconds, days and microseconds) - dtm = self.__radd__(dt2) - - # If we've overshot our target, make an adjustment - if dt1 < dt2: - compare = operator.gt - increment = 1 - else: - compare = operator.lt - increment = -1 - - while compare(dt1, dtm): - months += increment - self._set_months(months) - dtm = self.__radd__(dt2) - - # Get the timedelta between the "months-adjusted" date and dt1 - delta = dt1 - dtm - self.seconds = delta.seconds + delta.days * 86400 - self.microseconds = delta.microseconds - else: - # Check for non-integer values in integer-only quantities - if any(x is not None and x != int(x) for x in (years, months)): - raise ValueError("Non-integer years and months are " - "ambiguous and not currently supported.") - - # Relative information - self.years = int(years) - self.months = int(months) - self.days = days + weeks * 7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - - # Absolute information - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if any(x is not None and int(x) != x - for x in (year, month, day, hour, - minute, second, microsecond)): - # For now we'll deprecate floats - later it'll be an error. - warn("Non-integer value passed as absolute information. " + - "This is not a well-defined condition and will raise " + - "errors in future versions.", DeprecationWarning) - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, - 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = _sign(self.microseconds) - div, mod = divmod(self.microseconds * s, 1000000) - self.microseconds = mod * s - self.seconds += div * s - if abs(self.seconds) > 59: - s = _sign(self.seconds) - div, mod = divmod(self.seconds * s, 60) - self.seconds = mod * s - self.minutes += div * s - if abs(self.minutes) > 59: - s = _sign(self.minutes) - div, mod = divmod(self.minutes * s, 60) - self.minutes = mod * s - self.hours += div * s - if abs(self.hours) > 23: - s = _sign(self.hours) - div, mod = divmod(self.hours * s, 24) - self.hours = mod * s - self.days += div * s - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years += div * s - if (self.hours or self.minutes or self.seconds or self.microseconds - or self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - @property - def weeks(self): - return int(self.days / 7.0) - - @weeks.setter - def weeks(self, value): - self.days = self.days - (self.weeks * 7) + value * 7 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years = div * s - else: - self.years = 0 - - def normalized(self): - """ - Return a version of this object represented entirely using integer - values for the relative attributes. - - >>> relativedelta(days=1.5, hours=2).normalized() - relativedelta(days=+1, hours=+14) - - :return: - Returns a :class:`dateutil.relativedelta.relativedelta` object. - """ - # Cascade remainders down (rounding each to roughly nearest microsecond) - days = int(self.days) - - hours_f = round(self.hours + 24 * (self.days - days), 11) - hours = int(hours_f) - - minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) - minutes = int(minutes_f) - - seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) - seconds = int(seconds_f) - - microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) - - # Constructor carries overflow back up with call to _fix() - return self.__class__(years=self.years, months=self.months, - days=days, hours=hours, minutes=minutes, - seconds=seconds, microseconds=microseconds, - leapdays=self.leapdays, year=self.year, - month=self.month, day=self.day, - weekday=self.weekday, hour=self.hour, - minute=self.minute, second=self.second, - microsecond=self.microsecond) - - def __add__(self, other): - if isinstance(other, relativedelta): - return self.__class__(years=other.years + self.years, - months=other.months + self.months, - days=other.days + self.days, - hours=other.hours + self.hours, - minutes=other.minutes + self.minutes, - seconds=other.seconds + self.seconds, - microseconds=(other.microseconds + - self.microseconds), - leapdays=other.leapdays or self.leapdays, - year=(other.year if other.year is not None - else self.year), - month=(other.month if other.month is not None - else self.month), - day=(other.day if other.day is not None - else self.day), - weekday=(other.weekday if other.weekday is not None - else self.weekday), - hour=(other.hour if other.hour is not None - else self.hour), - minute=(other.minute if other.minute is not None - else self.minute), - second=(other.second if other.second is not None - else self.second), - microsecond=(other.microsecond if other.microsecond - is not None else - self.microsecond)) - if isinstance(other, datetime.timedelta): - return self.__class__(years=self.years, - months=self.months, - days=self.days + other.days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds + other.seconds, - microseconds=self.microseconds + other.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - if not isinstance(other, datetime.date): - return NotImplemented - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth) - 1) * 7 - if nth > 0: - jumpdays += (7 - ret.weekday() + weekday) % 7 - else: - jumpdays += (ret.weekday() - weekday) % 7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented # In case the other object defines __rsub__ - return self.__class__(years=self.years - other.years, - months=self.months - other.months, - days=self.days - other.days, - hours=self.hours - other.hours, - minutes=self.minutes - other.minutes, - seconds=self.seconds - other.seconds, - microseconds=self.microseconds - other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=(self.year if self.year is not None - else other.year), - month=(self.month if self.month is not None else - other.month), - day=(self.day if self.day is not None else - other.day), - weekday=(self.weekday if self.weekday is not None else - other.weekday), - hour=(self.hour if self.hour is not None else - other.hour), - minute=(self.minute if self.minute is not None else - other.minute), - second=(self.second if self.second is not None else - other.second), - microsecond=(self.microsecond if self.microsecond - is not None else - other.microsecond)) - - def __abs__(self): - return self.__class__(years=abs(self.years), - months=abs(self.months), - days=abs(self.days), - hours=abs(self.hours), - minutes=abs(self.minutes), - seconds=abs(self.seconds), - microseconds=abs(self.microseconds), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __neg__(self): - return self.__class__(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - # Compatibility with Python 2.x - __nonzero__ = __bool__ - - def __mul__(self, other): - try: - f = float(other) - except TypeError: - return NotImplemented - - return self.__class__(years=int(self.years * f), - months=int(self.months * f), - days=int(self.days * f), - hours=int(self.hours * f), - minutes=int(self.minutes * f), - seconds=int(self.seconds * f), - microseconds=int(self.microseconds * f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.microseconds == other.microseconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __hash__(self): - return hash(( - self.weekday, - self.years, - self.months, - self.days, - self.hours, - self.minutes, - self.seconds, - self.microseconds, - self.leapdays, - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - )) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - try: - reciprocal = 1 / float(other) - except TypeError: - return NotImplemented - - return self.__mul__(reciprocal) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("{attr}={value:+g}".format(attr=attr, value=value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("{attr}={value}".format(attr=attr, value=repr(value))) - return "{classname}({attrs})".format(classname=self.__class__.__name__, - attrs=", ".join(l)) - - -def _sign(x): - return int(copysign(1, x)) - -# vim:ts=4:sw=4:et diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/group_points.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/TEnngal/bingo/src/app/layout.tsx b/spaces/TEnngal/bingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
      - {/* @ts-ignore */} -
      -
      {children}
      -
      - -
      - - - ) -} diff --git a/spaces/TNR-5/Image-Semantic-Searchj/README.md b/spaces/TNR-5/Image-Semantic-Searchj/README.md deleted file mode 100644 index a7ce5651f873bbf28a6526cbd4767dd8234e8963..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/Image-Semantic-Searchj/README.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: 🖼 ImgLib -emoji: ✨️🖼 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.2.0 -app_file: imglib.py -pinned: true -license: mit ---- - -![ImgLib](https://huggingface.co/spaces/TNR-5/Image-Semantic-Searchj/resolve/main/img/ImgLib.png) - -

      - -

      Immerse yourself in the world of beautiful images of everything, here you have a whale, and a house, and even a landscape, and all this is generated by AI and is completely unique!

      diff --git a/spaces/TYH71/gradio-ml-skeleton/launch_docker.sh b/spaces/TYH71/gradio-ml-skeleton/launch_docker.sh deleted file mode 100644 index 9101b7349f5f4dae30902a84eea32d40f3b3d59c..0000000000000000000000000000000000000000 --- a/spaces/TYH71/gradio-ml-skeleton/launch_docker.sh +++ /dev/null @@ -1,7 +0,0 @@ -if [ ! -n "$docker_repo_name" ] || [ ! -n "$docker_tag" ]; then - echo "Error: Parameters not defined." - exit 1 -fi - -docker run -it -p 7860:7860 \ - $docker_repo_name:$docker_tag \ No newline at end of file diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py deleted file mode 100644 index 09a6c66cf6f4b21c38a7829b029f0ab5deda1f9e..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch -import torch.distributed as dist -from fvcore.nn.distributed import differentiable_all_reduce -from torch import nn -from torch.nn import functional as F - -from detectron2.utils import comm, env - -from .wrappers import BatchNorm2d - - -class FrozenBatchNorm2d(nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - It contains non-trainable buffers called - "weight" and "bias", "running_mean", "running_var", - initialized to perform identity transformation. - - The pre-trained backbone models from Caffe2 only contain "weight" and "bias", - which are computed from the original four parameters of BN. - The affine transform `x * weight + bias` will perform the equivalent - computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. - When loading a backbone model from Caffe2, "running_mean" and "running_var" - will be left unchanged as identity transformation. - - Other pre-trained backbone models may contain all 4 parameters. - - The forward is implemented by `F.batch_norm(..., training=False)`. - """ - - _version = 3 - - def __init__(self, num_features, eps=1e-5): - super().__init__() - self.num_features = num_features - self.eps = eps - self.register_buffer("weight", torch.ones(num_features)) - self.register_buffer("bias", torch.zeros(num_features)) - self.register_buffer("running_mean", torch.zeros(num_features)) - self.register_buffer("running_var", torch.ones(num_features) - eps) - - def forward(self, x): - if x.requires_grad: - # When gradients are needed, F.batch_norm will use extra memory - # because its backward op computes gradients for weight/bias as well. - scale = self.weight * (self.running_var + self.eps).rsqrt() - bias = self.bias - self.running_mean * scale - scale = scale.reshape(1, -1, 1, 1) - bias = bias.reshape(1, -1, 1, 1) - out_dtype = x.dtype # may be half - return x * scale.to(out_dtype) + bias.to(out_dtype) - else: - # When gradients are not needed, F.batch_norm is a single fused op - # and provide more optimization opportunities. - return F.batch_norm( - x, - self.running_mean, - self.running_var, - self.weight, - self.bias, - training=False, - eps=self.eps, - ) - - def _load_from_state_dict( - self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ): - version = local_metadata.get("version", None) - - if version is None or version < 2: - # No running_mean/var in early versions - # This will silent the warnings - if prefix + "running_mean" not in state_dict: - state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) - if prefix + "running_var" not in state_dict: - state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) - - super()._load_from_state_dict( - state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ) - - def __repr__(self): - return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) - - @classmethod - def convert_frozen_batchnorm(cls, module): - """ - Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. - - Args: - module (torch.nn.Module): - - Returns: - If module is BatchNorm/SyncBatchNorm, returns a new module. - Otherwise, in-place convert module and return it. - - Similar to convert_sync_batchnorm in - https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py - """ - bn_module = nn.modules.batchnorm - bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) - res = module - if isinstance(module, bn_module): - res = cls(module.num_features) - if module.affine: - res.weight.data = module.weight.data.clone().detach() - res.bias.data = module.bias.data.clone().detach() - res.running_mean.data = module.running_mean.data - res.running_var.data = module.running_var.data - res.eps = module.eps - else: - for name, child in module.named_children(): - new_child = cls.convert_frozen_batchnorm(child) - if new_child is not child: - res.add_module(name, new_child) - return res - - -def get_norm(norm, out_channels): - """ - Args: - norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; - or a callable that takes a channel number and returns - the normalization layer as a nn.Module. - - Returns: - nn.Module or None: the normalization layer - """ - if norm is None: - return None - if isinstance(norm, str): - if len(norm) == 0: - return None - norm = { - "BN": BatchNorm2d, - # Fixed in https://github.com/pytorch/pytorch/pull/36382 - "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, - "FrozenBN": FrozenBatchNorm2d, - "GN": lambda channels: nn.GroupNorm(32, channels), - # for debugging: - "nnSyncBN": nn.SyncBatchNorm, - "naiveSyncBN": NaiveSyncBatchNorm, - # expose stats_mode N as an option to caller, required for zero-len inputs - "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"), - }[norm] - return norm(out_channels) - - -class NaiveSyncBatchNorm(BatchNorm2d): - """ - In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient - when the batch size on each worker is different. - (e.g., when scale augmentation is used, or when it is applied to mask head). - - This is a slower but correct alternative to `nn.SyncBatchNorm`. - - Note: - There isn't a single definition of Sync BatchNorm. - - When ``stats_mode==""``, this module computes overall statistics by using - statistics of each worker with equal weight. The result is true statistics - of all samples (as if they are all on one worker) only when all workers - have the same (N, H, W). This mode does not support inputs with zero batch size. - - When ``stats_mode=="N"``, this module computes overall statistics by weighting - the statistics of each worker by their ``N``. The result is true statistics - of all samples (as if they are all on one worker) only when all workers - have the same (H, W). It is slower than ``stats_mode==""``. - - Even though the result of this module may not be the true statistics of all samples, - it may still be reasonable because it might be preferrable to assign equal weights - to all workers, regardless of their (H, W) dimension, instead of putting larger weight - on larger images. From preliminary experiments, little difference is found between such - a simplified implementation and an accurate computation of overall mean & variance. - """ - - def __init__(self, *args, stats_mode="", **kwargs): - super().__init__(*args, **kwargs) - assert stats_mode in ["", "N"] - self._stats_mode = stats_mode - - def forward(self, input): - if comm.get_world_size() == 1 or not self.training: - return super().forward(input) - - B, C = input.shape[0], input.shape[1] - - half_input = input.dtype == torch.float16 - if half_input: - # fp16 does not have good enough numerics for the reduction here - input = input.float() - mean = torch.mean(input, dim=[0, 2, 3]) - meansqr = torch.mean(input * input, dim=[0, 2, 3]) - - if self._stats_mode == "": - assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' - vec = torch.cat([mean, meansqr], dim=0) - vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) - mean, meansqr = torch.split(vec, C) - momentum = self.momentum - else: - if B == 0: - vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) - vec = vec + input.sum() # make sure there is gradient w.r.t input - else: - vec = torch.cat( - [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 - ) - vec = differentiable_all_reduce(vec * B) - - total_batch = vec[-1].detach() - momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 - mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero - - var = meansqr - mean * mean - invstd = torch.rsqrt(var + self.eps) - scale = self.weight * invstd - bias = self.bias - mean * scale - scale = scale.reshape(1, -1, 1, 1) - bias = bias.reshape(1, -1, 1, 1) - - self.running_mean += momentum * (mean.detach() - self.running_mean) - self.running_var += momentum * (var.detach() - self.running_var) - ret = input * scale + bias - if half_input: - ret = ret.half() - return ret - - -class CycleBatchNormList(nn.ModuleList): - """ - Implement domain-specific BatchNorm by cycling. - - When a BatchNorm layer is used for multiple input domains or input - features, it might need to maintain a separate test-time statistics - for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`. - - This module implements it by using N separate BN layers - and it cycles through them every time a forward() is called. - - NOTE: The caller of this module MUST guarantee to always call - this module by multiple of N times. Otherwise its test-time statistics - will be incorrect. - """ - - def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs): - """ - Args: - length: number of BatchNorm layers to cycle. - bn_class: the BatchNorm class to use - kwargs: arguments of the BatchNorm class, such as num_features. - """ - self._affine = kwargs.pop("affine", True) - super().__init__([bn_class(**kwargs, affine=False) for k in range(length)]) - if self._affine: - # shared affine, domain-specific BN - channels = self[0].num_features - self.weight = nn.Parameter(torch.ones(channels)) - self.bias = nn.Parameter(torch.zeros(channels)) - self._pos = 0 - - def forward(self, x): - ret = self[self._pos](x) - self._pos = (self._pos + 1) % len(self) - - if self._affine: - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - return ret * w + b - else: - return ret - - def extra_repr(self): - return f"affine={self._affine}" diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py deleted file mode 100644 index b6d95690c381798d6af54087f050105791e94fe3..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Part of the code is from https://github.com/tztztztztz/eql.detectron2/blob/master/projects/EQL/eql/fast_rcnn.py -import logging -import math -import json -from typing import Dict, Union -import torch -from fvcore.nn import giou_loss, smooth_l1_loss -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Linear, ShapeSpec, batched_nms, cat, nonzero_tuple -from detectron2.modeling.box_regression import Box2BoxTransform -from detectron2.structures import Boxes, Instances -from detectron2.utils.events import get_event_storage -from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers -from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference -from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats -from detectron2.utils.comm import get_world_size -from .fed_loss import load_class_freq, get_fed_loss_inds - -__all__ = ["CustomFastRCNNOutputLayers"] - -class CustomFastRCNNOutputLayers(FastRCNNOutputLayers): - def __init__( - self, - cfg, - input_shape: ShapeSpec, - **kwargs - ): - super().__init__(cfg, input_shape, **kwargs) - - self.cfg = cfg - - def losses(self, predictions, proposals): - """ - enable advanced loss - """ - scores, proposal_deltas = predictions - gt_classes = ( - cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0) - ) - num_classes = self.num_classes - _log_classification_stats(scores, gt_classes) - - if len(proposals): - proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4 - assert not proposal_boxes.requires_grad, "Proposals should not require gradients!" - gt_boxes = cat( - [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals], - dim=0, - ) - else: - proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) - - loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes) - return { - "loss_cls": loss_cls, - "loss_box_reg": self.box_reg_loss( - proposal_boxes, gt_boxes, proposal_deltas, gt_classes) - } - - - def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes): - if pred_class_logits.numel() == 0: - return pred_class_logits.new_zeros([1])[0] # This is more robust than .sum() * 0. - - B = pred_class_logits.shape[0] - C = pred_class_logits.shape[1] - 1 - - target = pred_class_logits.new_zeros(B, C + 1) - target[range(len(gt_classes)), gt_classes] = 1 # B x (C + 1) - target = target[:, :C] # B x C - - weight = 1 - - cls_loss = F.binary_cross_entropy_with_logits( - pred_class_logits[:, :-1], target, reduction='none') # B x C - loss = torch.sum(cls_loss * weight) / B - return loss - - - def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes): - """ - change _no_instance handling - """ - if pred_class_logits.numel() == 0: - return pred_class_logits.new_zeros([1])[0] - - loss = F.cross_entropy( - pred_class_logits, gt_classes, reduction="mean") - return loss - - - def inference(self, predictions, proposals): - """ - enable use proposal boxes - """ - boxes = self.predict_boxes(predictions, proposals) - scores = self.predict_probs(predictions, proposals) - if self.cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE: - proposal_scores = [p.get('objectness_logits') for p in proposals] - scores = [(s * ps[:, None]) ** 0.5 \ - for s, ps in zip(scores, proposal_scores)] - image_shapes = [x.image_size for x in proposals] - return fast_rcnn_inference( - boxes, - scores, - image_shapes, - self.test_score_thresh, - self.test_nms_thresh, - self.test_topk_per_image, - ) - - - def predict_probs(self, predictions, proposals): - """ - support sigmoid - """ - scores, _ = predictions - num_inst_per_image = [len(p) for p in proposals] - probs = F.softmax(scores, dim=-1) - return probs.split(num_inst_per_image, dim=0) diff --git a/spaces/Tomoniai/Demo_Mistral_Chat/README.md b/spaces/Tomoniai/Demo_Mistral_Chat/README.md deleted file mode 100644 index 24a95b18b750c653411ec70c0bfa700b5aa766cb..0000000000000000000000000000000000000000 --- a/spaces/Tomoniai/Demo_Mistral_Chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Demo Mistral Chat -emoji: 🏆 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.45.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Txandim/stabilityai-stable-diffusion-2-1-base/README.md b/spaces/Txandim/stabilityai-stable-diffusion-2-1-base/README.md deleted file mode 100644 index 7198e03b18c9c0ae693d87775fdc83d6cf9df054..0000000000000000000000000000000000000000 --- a/spaces/Txandim/stabilityai-stable-diffusion-2-1-base/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 1 Base -emoji: 🚀 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/UGK/UGK/README.md b/spaces/UGK/UGK/README.md deleted file mode 100644 index afb44018b2cb1cf75d0059ce35eead91e0f49b66..0000000000000000000000000000000000000000 --- a/spaces/UGK/UGK/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: UGK -emoji: 📚 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/User1342/WatchTower/Pinpoint/Aggregator_Word2Vec.py b/spaces/User1342/WatchTower/Pinpoint/Aggregator_Word2Vec.py deleted file mode 100644 index 9be57952774f125cb8ac53dc60b8a0afb32b6256..0000000000000000000000000000000000000000 --- a/spaces/User1342/WatchTower/Pinpoint/Aggregator_Word2Vec.py +++ /dev/null @@ -1,32 +0,0 @@ -from gensim.models import Word2Vec - - -class word_2_vec_aggregator(): - """ - A wrapper function around gensim used for creating a word 2 vec model - """ - - def get_model(self, list_of_sentences): - """ - Used to retrieve the model - :param list_of_sentences: - :return: the model - """ - - list_of_sentences_in_nested_list = [] - - for sentence in list_of_sentences: - - # Skip unigrams - if " " not in sentence: - continue - - list_of_sentences_in_nested_list.append(sentence.split(" ")) - - model = Word2Vec(min_count=1, window=5) # vector size of 100 and window size of 5? - model.build_vocab(list_of_sentences_in_nested_list) # prepare the model vocabulary - model.model_trimmed_post_training = False - model.train(list_of_sentences_in_nested_list, total_examples=model.corpus_count, - epochs=model.epochs) # train word vectors - - return model diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/waiter.h b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/waiter.h deleted file mode 100644 index ee45fe3517be95ac1688a3e3540189edeb0d860c..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/waiter.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "libipc/def.h" -#include "libipc/mutex.h" -#include "libipc/condition.h" -#include "libipc/platform/detail.h" - -namespace ipc { -namespace detail { - -class waiter { - ipc::sync::condition cond_; - ipc::sync::mutex lock_; - std::atomic quit_ {false}; - -public: - static void init(); - - waiter() = default; - waiter(char const *name) { - open(name); - } - - ~waiter() { - close(); - } - - bool valid() const noexcept { - return cond_.valid() && lock_.valid(); - } - - bool open(char const *name) noexcept { - quit_.store(false, std::memory_order_relaxed); - if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) { - return false; - } - if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) { - cond_.close(); - return false; - } - return valid(); - } - - void close() noexcept { - cond_.close(); - lock_.close(); - } - - template - bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept { - IPC_UNUSED_ std::lock_guard guard {lock_}; - while ([this, &pred] { - return !quit_.load(std::memory_order_relaxed) - && std::forward(pred)(); - }()) { - if (!cond_.wait(lock_, tm)) return false; - } - return true; - } - - bool notify() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.notify(lock_); - } - - bool broadcast() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.broadcast(lock_); - } - - bool quit_waiting() { - quit_.store(true, std::memory_order_release); - return broadcast(); - } -}; - -} // namespace detail -} // namespace ipc diff --git a/spaces/XzJosh/Ava2-Bert-VITS2/resample.py b/spaces/XzJosh/Ava2-Bert-VITS2/resample.py deleted file mode 100644 index 2ed1685654a371c5722168e9987809b05b1cb224..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Ava2-Bert-VITS2/resample.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/XzJosh/Carol-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/Carol-Bert-VITS2/monotonic_align/__init__.py deleted file mode 100644 index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Carol-Bert-VITS2/monotonic_align/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - -def maximum_path(neg_cent, mask): - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/XzJosh/maimai-Bert-VITS2/text/symbols.py b/spaces/XzJosh/maimai-Bert-VITS2/text/symbols.py deleted file mode 100644 index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/maimai-Bert-VITS2/text/symbols.py +++ /dev/null @@ -1,51 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a&b)) - diff --git a/spaces/XzJosh/nine1-Bert-VITS2/text/chinese.py b/spaces/XzJosh/nine1-Bert-VITS2/text/chinese.py deleted file mode 100644 index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine1-Bert-VITS2/text/chinese.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from text import symbols -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in - open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()} - -import jieba.posseg as psg - - -rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - '$': '.', - '“': "'", - '”': "'", - '‘': "'", - '’': "'", - '(': "'", - ')': "'", - '(': "'", - ')': "'", - '《': "'", - '》': "'", - '【': "'", - '】': "'", - '[': "'", - ']': "'", - '—': "-", - '~': "-", - '~': "-", - '「': "'", - '」': "'", - -} - -tone_modifier = ToneSandhi() - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣","母") - pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text) - - return replaced_text - -def g2p(text): - pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip()!=''] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch. - phones = ['_'] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - pinyins = [] - # Replace all English words in the sentence - seg = re.sub('[a-zA-Z]+', '', seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == 'eng': - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, - sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c+v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = '0' - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c+v_without_tone - assert tone in '12345' - - if c: - # 多音节 - v_rep_map = { - "uei": 'ui', - 'iou': 'iu', - 'uen': 'un', - } - if v_without_tone in v_rep_map.keys(): - pinyin = c+v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - 'ing': 'ying', - 'i': 'yi', - 'in': 'yin', - 'u': 'wu', - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - 'v': 'yu', - 'e': 'e', - 'i': 'y', - 'u': 'w', - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]]+pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(' ') - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - - -def text_normalize(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - -def get_bert_feature(text, word2ph): - from text import chinese_bert - return chinese_bert.get_bert_feature(text, word2ph) - -if __name__ == '__main__': - from text.chinese_bert import get_bert_feature - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/YUANAI/DiffspeechResearch/tasks/tts/ps.py b/spaces/YUANAI/DiffspeechResearch/tasks/tts/ps.py deleted file mode 100644 index 995dec8c7f40c27310a6231b08330e807d02c405..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/tasks/tts/ps.py +++ /dev/null @@ -1,194 +0,0 @@ -import os -import torch -import torch.nn.functional as F -from torch import nn - -from modules.tts.portaspeech.portaspeech import PortaSpeech -from tasks.tts.fs import FastSpeechTask -from utils.audio.align import mel2token_to_dur -from utils.commons.hparams import hparams -from utils.metrics.diagonal_metrics import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate -from utils.nn.model_utils import num_params -import numpy as np - -from utils.plot.plot import spec_to_figure -from utils.text.text_encoder import build_token_encoder - - -class PortaSpeechTask(FastSpeechTask): - def __init__(self): - super().__init__() - data_dir = hparams['binary_data_dir'] - self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') - - def build_tts_model(self): - ph_dict_size = len(self.token_encoder) - word_dict_size = len(self.word_encoder) - self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) - - def on_train_start(self): - super().on_train_start() - for n, m in self.model.named_children(): - num_params(m, model_name=n) - if hasattr(self.model, 'fvae'): - for n, m in self.model.fvae.named_children(): - num_params(m, model_name=f'fvae.{n}') - - def run_model(self, sample, infer=False, *args, **kwargs): - txt_tokens = sample['txt_tokens'] - word_tokens = sample['word_tokens'] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - if not infer: - output = self.model(txt_tokens, word_tokens, - ph2word=sample['ph2word'], - mel2word=sample['mel2word'], - mel2ph=sample['mel2ph'], - word_len=sample['word_lengths'].max(), - tgt_mels=sample['mels'], - pitch=sample.get('pitch'), - spk_embed=spk_embed, - spk_id=spk_id, - infer=False, - global_step=self.global_step) - losses = {} - losses['kl_v'] = output['kl'].detach() - losses_kl = output['kl'] - losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) - losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl - losses_kl = losses_kl * hparams['lambda_kl'] - losses['kl'] = losses_kl - self.add_mel_loss(output['mel_out'], sample['mels'], losses) - if hparams['dur_level'] == 'word': - self.add_dur_loss( - output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) - self.get_attn_stats(output['attn'], sample, losses) - else: - super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - output = self.model( - txt_tokens, word_tokens, - ph2word=sample['ph2word'], - word_len=sample['word_lengths'].max(), - pitch=sample.get('pitch'), - mel2ph=sample['mel2ph'] if use_gt_dur else None, - mel2word=sample['mel2word'] if use_gt_dur else None, - tgt_mels=sample['mels'], - infer=True, - spk_embed=spk_embed, - spk_id=spk_id, - ) - return output - - def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): - T = word_len.max() - dur_gt = mel2token_to_dur(mel2token, T).float() - nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() - dur_pred = dur_pred * nonpadding - dur_gt = dur_gt * nonpadding - wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') - wdur = (wdur * nonpadding).sum() / nonpadding.sum() - if hparams['lambda_word_dur'] > 0: - losses['wdur'] = wdur * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - def validation_step(self, sample, batch_idx): - return super().validation_step(sample, batch_idx) - - def save_valid_result(self, sample, batch_idx, model_out): - super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out) - if self.global_step > 0 and hparams['dur_level'] == 'word': - self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) - - def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): - # diagonal_focus_rate - txt_lengths = sample['txt_lengths'].float() - mel_lengths = sample['mel_lengths'].float() - src_padding_mask = sample['txt_tokens'].eq(0) - target_padding_mask = sample['mels'].abs().sum(-1).eq(0) - src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) - attn_ks = txt_lengths.float() / mel_lengths.float() - - focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data - phone_coverage_rate = get_phone_coverage_rate( - attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() - diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( - attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) - logging_outputs[f'{prefix}fr'] = focus_rate.mean().data - logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data - logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data - - def get_plot_dur_info(self, sample, model_out): - if hparams['dur_level'] == 'word': - T_txt = sample['word_lengths'].max() - dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] - dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt - txt = sample['ph_words'][0].split(" ") - else: - T_txt = sample['txt_tokens'].shape[1] - dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] - dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt - txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) - txt = txt.split(" ") - return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} - - def build_optimizer(self, model): - self.optimizer = torch.optim.AdamW( - self.model.parameters(), - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return self.optimizer - - def build_scheduler(self, optimizer): - return FastSpeechTask.build_scheduler(self, optimizer) - - ############ - # infer - ############ - def test_start(self): - super().test_start() - if hparams.get('save_attn', False): - os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) - self.model.store_inverse_all() - - def test_step(self, sample, batch_idx): - assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' - outputs = self.run_model(sample, infer=True) - text = sample['text'][0] - item_name = sample['item_name'][0] - tokens = sample['txt_tokens'][0].cpu().numpy() - mel_gt = sample['mels'][0].cpu().numpy() - mel_pred = outputs['mel_out'][0].cpu().numpy() - mel2ph = sample['mel2ph'][0].cpu().numpy() - mel2ph_pred = None - str_phs = self.token_encoder.decode(tokens, strip_padding=True) - base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' - if text is not None: - base_fn += text.replace(":", "$3A")[:80] - base_fn = base_fn.replace(' ', '_') - gen_dir = self.gen_dir - wav_pred = self.vocoder.spec2wav(mel_pred) - self.saving_result_pool.add_job(self.save_result, args=[ - wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) - if hparams['save_gt']: - wav_gt = self.vocoder.spec2wav(mel_gt) - self.saving_result_pool.add_job(self.save_result, args=[ - wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) - if hparams.get('save_attn', False): - attn = outputs['attn'][0].cpu().numpy() - np.save(f'{gen_dir}/attn/{item_name}.npy', attn) - print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") - return { - 'item_name': item_name, - 'text': text, - 'ph_tokens': self.token_encoder.decode(tokens.tolist()), - 'wav_fn_pred': base_fn % 'P', - 'wav_fn_gt': base_fn % 'G', - } diff --git a/spaces/Yiqin/ChatVID/model/fastchat/serve/gradio_web_server.py b/spaces/Yiqin/ChatVID/model/fastchat/serve/gradio_web_server.py deleted file mode 100644 index 347602b32e93baf375c7237d09f4072701fa9631..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/fastchat/serve/gradio_web_server.py +++ /dev/null @@ -1,509 +0,0 @@ -import argparse -from collections import defaultdict -import datetime -import json -import os -import time -import uuid - -import gradio as gr -import requests - -from fastchat.conversation import ( - get_default_conv_template, - compute_skip_echo_len, - SeparatorStyle, -) -from fastchat.constants import LOGDIR -from fastchat.utils import ( - build_logger, - server_error_msg, - violates_moderation, - moderation_msg, -) -from fastchat.serve.gradio_patch import Chatbot as grChatbot -from fastchat.serve.gradio_css import code_highlight_css - - -logger = build_logger("gradio_web_server", "gradio_web_server.log") - -headers = {"User-Agent": "fastchat Client"} - -no_change_btn = gr.Button.update() -enable_btn = gr.Button.update(interactive=True) -disable_btn = gr.Button.update(interactive=False) - -controller_url = None -enable_moderation = False -models = [] - -priority = { - "vicuna-13b": "aaa", - "koala-13b": "aab", - "oasst-pythia-12b": "aac", - "dolly-v2-12b": "aad", - "chatglm-6b": "aae", - "stablelm-tuned-alpha-7b": "aaf", -} - - -def set_global_vars(controller_url_, enable_moderation_, models_): - global controller_url, enable_moderation, models - controller_url = controller_url_ - enable_moderation = enable_moderation_ - models = models_ - - -def get_conv_log_filename(): - t = datetime.datetime.now() - name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json") - return name - - -def get_model_list(controller_url): - ret = requests.post(controller_url + "/refresh_all_workers") - assert ret.status_code == 200 - ret = requests.post(controller_url + "/list_models") - models = ret.json()["models"] - models.sort(key=lambda x: priority.get(x, x)) - logger.info(f"Models: {models}") - return models - - -get_window_url_params = """ -function() { - const params = new URLSearchParams(window.location.search); - url_params = Object.fromEntries(params); - console.log("url_params", url_params); - return url_params; - } -""" - - -def load_demo_single(url_params): - dropdown_update = gr.Dropdown.update(visible=True) - if "model" in url_params: - model = url_params["model"] - if model in models: - dropdown_update = gr.Dropdown.update(value=model, visible=True) - - state = None - return ( - state, - dropdown_update, - gr.Chatbot.update(visible=True), - gr.Textbox.update(visible=True), - gr.Button.update(visible=True), - gr.Row.update(visible=True), - gr.Accordion.update(visible=True), - ) - - -def load_demo(url_params, request: gr.Request): - logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}") - return load_demo_single(url_params) - - -def vote_last_response(state, vote_type, model_selector, request: gr.Request): - with open(get_conv_log_filename(), "a") as fout: - data = { - "tstamp": round(time.time(), 4), - "type": vote_type, - "model": model_selector, - "state": state.dict(), - "ip": request.client.host, - } - fout.write(json.dumps(data) + "\n") - - -def upvote_last_response(state, model_selector, request: gr.Request): - logger.info(f"upvote. ip: {request.client.host}") - vote_last_response(state, "upvote", model_selector, request) - return ("",) + (disable_btn,) * 3 - - -def downvote_last_response(state, model_selector, request: gr.Request): - logger.info(f"downvote. ip: {request.client.host}") - vote_last_response(state, "downvote", model_selector, request) - return ("",) + (disable_btn,) * 3 - - -def flag_last_response(state, model_selector, request: gr.Request): - logger.info(f"flag. ip: {request.client.host}") - vote_last_response(state, "flag", model_selector, request) - return ("",) + (disable_btn,) * 3 - - -def regenerate(state, request: gr.Request): - logger.info(f"regenerate. ip: {request.client.host}") - state.messages[-1][-1] = None - state.skip_next = False - return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5 - - -def clear_history(request: gr.Request): - logger.info(f"clear_history. ip: {request.client.host}") - state = None - return (state, [], "") + (disable_btn,) * 5 - - -def add_text(state, text, request: gr.Request): - logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}") - - if state is None: - state = get_default_conv_template("vicuna").copy() - - if len(text) <= 0: - state.skip_next = True - return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5 - if enable_moderation: - flagged = violates_moderation(text) - if flagged: - logger.info(f"violate moderation. ip: {request.client.host}. text: {text}") - state.skip_next = True - return (state, state.to_gradio_chatbot(), moderation_msg) + ( - no_change_btn, - ) * 5 - - text = text[:1536] # Hard cut-off - state.append_message(state.roles[0], text) - state.append_message(state.roles[1], None) - state.skip_next = False - return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5 - - -def post_process_code(code): - sep = "\n```" - if sep in code: - blocks = code.split(sep) - if len(blocks) % 2 == 1: - for i in range(1, len(blocks), 2): - blocks[i] = blocks[i].replace("\\_", "_") - code = sep.join(blocks) - return code - - -def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request): - logger.info(f"http_bot. ip: {request.client.host}") - start_tstamp = time.time() - model_name = model_selector - temperature = float(temperature) - max_new_tokens = int(max_new_tokens) - - if state.skip_next: - # This generate call is skipped due to invalid inputs - yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5 - return - - if len(state.messages) == state.offset + 2: - # First round of conversation - new_state = get_default_conv_template(model_name).copy() - new_state.conv_id = uuid.uuid4().hex - new_state.append_message(new_state.roles[0], state.messages[-2][1]) - new_state.append_message(new_state.roles[1], None) - state = new_state - - # Query worker address - ret = requests.post( - controller_url + "/get_worker_address", json={"model": model_name} - ) - worker_addr = ret.json()["address"] - logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}") - - # No available worker - if worker_addr == "": - state.messages[-1][-1] = server_error_msg - yield ( - state, - state.to_gradio_chatbot(), - disable_btn, - disable_btn, - disable_btn, - enable_btn, - enable_btn, - ) - return - - # Construct prompt - if "chatglm" in model_name: - prompt = state.messages[state.offset :] - else: - prompt = state.get_prompt() - skip_echo_len = compute_skip_echo_len(model_name, state, prompt) - - # Make requests - pload = { - "model": model_name, - "prompt": prompt, - "temperature": temperature, - "max_new_tokens": max_new_tokens, - "stop": state.sep if state.sep_style == SeparatorStyle.SINGLE else None, - } - logger.info(f"==== request ====\n{pload}") - - state.messages[-1][-1] = "▌" - yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5 - - try: - # Stream output - response = requests.post( - worker_addr + "/worker_generate_stream", - headers=headers, - json=pload, - stream=True, - timeout=20, - ) - for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): - if chunk: - data = json.loads(chunk.decode()) - if data["error_code"] == 0: - output = data["text"][skip_echo_len:].strip() - output = post_process_code(output) - state.messages[-1][-1] = output + "▌" - yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5 - else: - output = data["text"] + f" (error_code: {data['error_code']})" - state.messages[-1][-1] = output - yield (state, state.to_gradio_chatbot()) + ( - disable_btn, - disable_btn, - disable_btn, - enable_btn, - enable_btn, - ) - return - time.sleep(0.02) - except requests.exceptions.RequestException as e: - state.messages[-1][-1] = server_error_msg + f" (error_code: 4)" - yield (state, state.to_gradio_chatbot()) + ( - disable_btn, - disable_btn, - disable_btn, - enable_btn, - enable_btn, - ) - return - - state.messages[-1][-1] = state.messages[-1][-1][:-1] - yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5 - - finish_tstamp = time.time() - logger.info(f"{output}") - - with open(get_conv_log_filename(), "a") as fout: - data = { - "tstamp": round(finish_tstamp, 4), - "type": "chat", - "model": model_name, - "gen_params": { - "temperature": temperature, - "max_new_tokens": max_new_tokens, - }, - "start": round(start_tstamp, 4), - "finish": round(start_tstamp, 4), - "state": state.dict(), - "ip": request.client.host, - } - fout.write(json.dumps(data) + "\n") - - -block_css = ( - code_highlight_css - + """ -pre { - white-space: pre-wrap; /* Since CSS 2.1 */ - white-space: -moz-pre-wrap; /* Mozilla, since 1999 */ - white-space: -pre-wrap; /* Opera 4-6 */ - white-space: -o-pre-wrap; /* Opera 7 */ - word-wrap: break-word; /* Internet Explorer 5.5+ */ -} -#notice_markdown th { - display: none; -} -""" -) - - -def build_single_model_ui(): - notice_markdown = """ -# 🏔️ Chat with Open Large Language Models -- Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality. [[Blog post]](https://vicuna.lmsys.org) [[Evaluation]](https://vicuna.lmsys.org/eval/) -- Koala: A Dialogue Model for Academic Research. [[Blog post]](https://bair.berkeley.edu/blog/2023/04/03/koala/) -- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7) - -### Terms of use -By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.** - -### Choose a model to chat with -| | | -| ---- | ---- | -| [Vicuna](https://vicuna.lmsys.org): a chat assistant fine-tuned from LLaMA on user-shared conversations by LMSYS. | [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/): a dialogue model for academic research by BAIR | -| [OpenAssistant (oasst)](https://open-assistant.io/): a chat-based assistant for everyone by LAION. | [Dolly](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm): an instruction-tuned open large language model by Databricks. | -| [ChatGLM](https://chatglm.cn/blog): an open bilingual dialogue language model by Tsinghua University | [StableLM](https://github.com/stability-AI/stableLM/): Stability AI language models. | -| [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html): a model fine-tuned from LLaMA on instruction-following demonstrations by Stanford. | [LLaMA](https://arxiv.org/abs/2302.13971): open and efficient foundation language models by Meta. | -""" - - learn_more_markdown = """ -### License -The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation. -""" - - state = gr.State() - notice = gr.Markdown(notice_markdown, elem_id="notice_markdown") - - with gr.Row(elem_id="model_selector_row"): - model_selector = gr.Dropdown( - choices=models, - value=models[0] if len(models) > 0 else "", - interactive=True, - show_label=False, - ).style(container=False) - - chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=550) - with gr.Row(): - with gr.Column(scale=20): - textbox = gr.Textbox( - show_label=False, - placeholder="Enter text and press ENTER", - visible=False, - ).style(container=False) - with gr.Column(scale=1, min_width=50): - send_btn = gr.Button(value="Send", visible=False) - - with gr.Row(visible=False) as button_row: - upvote_btn = gr.Button(value="👍 Upvote", interactive=False) - downvote_btn = gr.Button(value="👎 Downvote", interactive=False) - flag_btn = gr.Button(value="⚠️ Flag", interactive=False) - # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False) - regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) - clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) - - with gr.Accordion("Parameters", open=False, visible=False) as parameter_row: - temperature = gr.Slider( - minimum=0.0, - maximum=1.0, - value=0.7, - step=0.1, - interactive=True, - label="Temperature", - ) - max_output_tokens = gr.Slider( - minimum=0, - maximum=1024, - value=512, - step=64, - interactive=True, - label="Max output tokens", - ) - - gr.Markdown(learn_more_markdown) - - # Register listeners - btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] - upvote_btn.click( - upvote_last_response, - [state, model_selector], - [textbox, upvote_btn, downvote_btn, flag_btn], - ) - downvote_btn.click( - downvote_last_response, - [state, model_selector], - [textbox, upvote_btn, downvote_btn, flag_btn], - ) - flag_btn.click( - flag_last_response, - [state, model_selector], - [textbox, upvote_btn, downvote_btn, flag_btn], - ) - regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then( - http_bot, - [state, model_selector, temperature, max_output_tokens], - [state, chatbot] + btn_list, - ) - clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list) - - model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list) - - textbox.submit( - add_text, [state, textbox], [state, chatbot, textbox] + btn_list - ).then( - http_bot, - [state, model_selector, temperature, max_output_tokens], - [state, chatbot] + btn_list, - ) - send_btn.click( - add_text, [state, textbox], [state, chatbot, textbox] + btn_list - ).then( - http_bot, - [state, model_selector, temperature, max_output_tokens], - [state, chatbot] + btn_list, - ) - - return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row - - -def build_demo(): - with gr.Blocks( - title="Chat with Open Large Language Models", - theme=gr.themes.Base(), - css=block_css, - ) as demo: - url_params = gr.JSON(visible=False) - - ( - state, - model_selector, - chatbot, - textbox, - send_btn, - button_row, - parameter_row, - ) = build_single_model_ui() - - if args.model_list_mode == "once": - demo.load( - load_demo, - [url_params], - [ - state, - model_selector, - chatbot, - textbox, - send_btn, - button_row, - parameter_row, - ], - _js=get_window_url_params, - ) - else: - raise ValueError(f"Unknown model list mode: {args.model_list_mode}") - - return demo - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="0.0.0.0") - parser.add_argument("--port", type=int) - parser.add_argument("--controller-url", type=str, default="http://localhost:21001") - parser.add_argument("--concurrency-count", type=int, default=10) - parser.add_argument( - "--model-list-mode", type=str, default="once", choices=["once", "reload"] - ) - parser.add_argument("--share", action="store_true") - parser.add_argument( - "--moderate", action="store_true", help="Enable content moderation" - ) - args = parser.parse_args() - logger.info(f"args: {args}") - - models = get_model_list(args.controller_url) - set_global_vars(args.controller_url, args.moderate, models) - - logger.info(args) - demo = build_demo() - demo.queue( - concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False - ).launch( - server_name=args.host, server_port=args.port, share=args.share, max_threads=200 - ) diff --git a/spaces/Yudha515/Rvc-Models/audiocraft/modules/transformer.py b/spaces/Yudha515/Rvc-Models/audiocraft/modules/transformer.py deleted file mode 100644 index e69cca829d774d0b8b36c0de9b7924373da81b43..0000000000000000000000000000000000000000 --- a/spaces/Yudha515/Rvc-Models/audiocraft/modules/transformer.py +++ /dev/null @@ -1,747 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Transformer model, with streaming support, xformer attention support -and easy causal attention with a potentially finite receptive field. - -See `StreamingTransformer` for more information. - -Unlike regular PyTorch Transformer, we make the hard choice that batches are first. -""" - -import typing as tp - -from einops import rearrange -import torch -import torch.nn as nn -from torch.nn import functional as F -from torch.utils.checkpoint import checkpoint as torch_checkpoint -from xformers import ops - -from .rope import RotaryEmbedding -from .streaming import StreamingModule - -_efficient_attention_backend: str = 'torch' - - -def set_efficient_attention_backend(backend: str = 'torch'): - # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster). - global _efficient_attention_backend - assert _efficient_attention_backend in ['xformers', 'torch'] - _efficient_attention_backend = backend - - -def _get_attention_time_dimension() -> int: - if _efficient_attention_backend == 'torch': - return 2 - else: - return 1 - - -def _is_profiled() -> bool: - # Return true if we are currently running with a xformers profiler activated. - try: - from xformers.profiler import profiler - except ImportError: - return False - return profiler._Profiler._CURRENT_PROFILER is not None - - -def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: - """Create normalization module for transformer encoder layer. - - Args: - norm_type (str): Normalization method. - dim (int): Dimension of the normalized layer. - **kwargs (dict): Additional parameters for normalization layer. - Returns: - nn.Module: Normalization module. - """ - if norm_type == 'layer_norm': - return nn.LayerNorm(dim, eps=1e-5, **kwargs) - else: - raise ValueError(f"Unknown norm type: {norm_type}") - - -def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, - dtype: torch.dtype = torch.float32) -> torch.Tensor: - """Create sinusoidal positional embedding, with shape `[B, T, C]`. - - Args: - positions (torch.Tensor): LongTensor of positions. - dim (int): Dimension of the embedding. - max_period (float): Maximum period of the cosine/sine functions. - dtype (torch.dtype or str): dtype to use to generate the embedding. - Returns: - torch.Tensor: Sinusoidal positional embedding. - """ - # We aim for BTC format - assert dim % 2 == 0 - half_dim = dim // 2 - positions = positions.to(dtype) - adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) - max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point - phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) - return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1) - - -def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers""" - if n_rep == 1: - return x - if _efficient_attention_backend == 'torch': - bs, n_kv_heads, slen, head_dim = x.shape - return ( - x[:, :, None, :, :] - .expand(bs, n_kv_heads, n_rep, slen, head_dim) - .reshape(bs, n_kv_heads * n_rep, slen, head_dim) - ) - else: - bs, slen, n_kv_heads, head_dim = x.shape - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) - - -class LayerScale(nn.Module): - """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). - This rescales diagonaly the residual outputs close to 0, with a learnt scale. - - Args: - channels (int): Number of channels. - init (float): Initial scale. - channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype or None): dtype to use to initialize the module. - """ - def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True, - device=None, dtype=None): - super().__init__() - self.channel_last = channel_last - self.scale = nn.Parameter( - torch.full((channels,), init, - requires_grad=True, device=device, dtype=dtype)) - - def forward(self, x: torch.Tensor): - if self.channel_last: - return self.scale * x - else: - return self.scale[:, None] * x - - -class StreamingMultiheadAttention(StreamingModule): - """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation. - - Args: - embed_dim (int): Dimension to project to. - num_heads (int): Number of heads. - dropout (float): Dropout level. - bias (bool): Use bias in projections. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - rope (`RotaryEmbedding` or None): Rope embedding to use. - cross_attention: Should be true when used as a cross attention. - All keys and values must be available at once, streaming is only for the queries. - Cannot be used with `causal` or `rope` (as it wouldn't make sens to - intepret the time steps in the keys relative to those in the queries). - safe_streaming (bool): Bug fix, will go away with xformers update. - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Sevice on which to initialize. - dtype (torch.dtype or None): dtype to use. - """ - def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False, - safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1, - device=None, dtype=None): - super().__init__() - factory_kwargs = {'device': device, 'dtype': dtype} - if past_context is not None: - assert causal - - self.embed_dim = embed_dim - self.causal = causal - self.past_context = past_context - self.memory_efficient = memory_efficient - self.attention_as_float32 = attention_as_float32 - self.rope = rope - self.cross_attention = cross_attention - self.safe_streaming = safe_streaming - self.num_heads = num_heads - self.dropout = dropout - self.kv_repeat = kv_repeat - if cross_attention: - assert not causal, "Causal cannot work with cross attention." - assert rope is None, "Rope cannot work with cross attention." - - if memory_efficient: - _verify_xformers_memory_efficient_compat() - - self.custom = _is_custom(custom, memory_efficient) - if self.custom: - out_dim = embed_dim - assert num_heads % kv_repeat == 0 - assert not cross_attention or kv_repeat == 1 - num_kv = num_heads // kv_repeat - kv_dim = (embed_dim // num_heads) * num_kv - out_dim += 2 * kv_dim - in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs) - # We try to follow the default PyTorch MHA convention, to easily compare results. - self.in_proj_weight = in_proj.weight - self.in_proj_bias = in_proj.bias - if bias: - self.in_proj_bias.data.zero_() # Following Pytorch convention - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) - if bias: - self.out_proj.bias.data.zero_() - else: - assert not qk_layer_norm - assert kv_repeat == 1 - self.mha = nn.MultiheadAttention( - embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True, - **factory_kwargs) - self.qk_layer_norm = qk_layer_norm - if qk_layer_norm: - assert self.custom - assert kv_repeat == 1 - ln_dim = embed_dim - self.q_layer_norm = nn.LayerNorm(ln_dim) - self.k_layer_norm = nn.LayerNorm(ln_dim) - - def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): - if not self.custom: - # Support compat with regular MHA - keys = [n for n, _ in self.mha.named_parameters()] - for key in keys: - if prefix + key in state_dict: - state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key) - super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) - - def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype): - # Return a causal mask, accounting for potentially stored past keys/values - # We actually return a bias for the attention score, as this has the same - # convention both in the builtin MHA in Pytorch, and Xformers functions. - time_dim = _get_attention_time_dimension() - if self.memory_efficient: - from xformers.ops import LowerTriangularMask - if current_steps == 1: - # If we only have one step, then we do not need a mask. - return None - elif 'past_keys' in self._streaming_state: - raise RuntimeError('Not supported at the moment') - else: - # Then we can safely use a lower triangular mask - return LowerTriangularMask() - if self._streaming_state: - past_keys = self._streaming_state['past_keys'] - past_steps = past_keys.shape[time_dim] - else: - past_steps = 0 - - queries_pos = torch.arange( - past_steps, current_steps + past_steps, device=device).view(-1, 1) - keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1) - delta = queries_pos - keys_pos - valid = delta >= 0 - if self.past_context is not None: - valid &= (delta <= self.past_context) - return torch.where( - valid, - torch.zeros([], device=device, dtype=dtype), - torch.full([], float('-inf'), device=device, dtype=dtype)) - - def _complete_kv(self, k, v): - time_dim = _get_attention_time_dimension() - if self.cross_attention: - # With cross attention we assume all keys and values - # are already available, and streaming is with respect - # to the queries only. - return k, v - # Complete the key/value pair using the streaming state. - if self._streaming_state: - pk = self._streaming_state['past_keys'] - nk = torch.cat([pk, k], dim=time_dim) - if v is k: - nv = nk - else: - pv = self._streaming_state['past_values'] - nv = torch.cat([pv, v], dim=time_dim) - else: - nk = k - nv = v - - assert nk.shape[time_dim] == nv.shape[time_dim] - offset = 0 - if self.past_context is not None: - offset = max(0, nk.shape[time_dim] - self.past_context) - if self._is_streaming: - self._streaming_state['past_keys'] = nk[:, offset:] - if v is not k: - self._streaming_state['past_values'] = nv[:, offset:] - if 'offset' in self._streaming_state: - self._streaming_state['offset'] += offset - else: - self._streaming_state['offset'] = torch.tensor(0) - return nk, nv - - def _apply_rope(self, query: torch.Tensor, key: torch.Tensor): - # TODO: fix and verify layout. - assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.' - # Apply rope embeddings to query and key tensors. - assert self.rope is not None - if 'past_keys' in self._streaming_state: - past_keys_offset = self._streaming_state['past_keys'].shape[1] - else: - past_keys_offset = 0 - if 'offset' in self._streaming_state: - past_context_offset = int(self._streaming_state['offset'].item()) - else: - past_context_offset = 0 - streaming_offset = past_context_offset + past_keys_offset - return self.rope.rotate_qk(query, key, start=streaming_offset) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - key_padding_mask=None, need_weights=False, attn_mask=None, - average_attn_weights=True, is_causal=False): - assert attn_mask is None - assert not is_causal, ("new param added in torch 2.0.1 not supported, " - "use the causal args in the constructor.") - - time_dim = _get_attention_time_dimension() - if time_dim == 2: - layout = "b h t d" - else: - layout = "b t h d" - dtype = query.dtype - if self._is_streaming: - assert self.causal or self.cross_attention, \ - "Streaming only available for causal or cross attention" - - if self.causal: - # At the moment we specialize only for the self-attention case. - assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value" - assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value" - attn_mask = self._get_mask(query.shape[1], query.device, query.dtype) - - if self.custom: - # custom implementation - assert need_weights is False - assert key_padding_mask is None - if self.cross_attention: - # Different queries, keys, values, we have to spit manually the weights - # before applying the linear. - dim = self.in_proj_weight.shape[0] // 3 - if self.in_proj_bias is None: - bias_q, bias_k, bias_v = None, None, None - else: - bias_q = self.in_proj_bias[:dim] - bias_k = self.in_proj_bias[dim: 2 * dim] - bias_v = self.in_proj_bias[2 * dim:] - q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q) - # todo: when streaming, we could actually save k, v and check the shape actually match. - k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k) - v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v) - if self.qk_layer_norm is True: - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]] - else: - if not _is_profiled(): - # profiling breaks that propertysomehow. - assert query is key, "specialized implementation" - assert value is key, "specialized implementation" - projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias) - if self.kv_repeat == 1: - if time_dim == 2: - bound_layout = "b h p t d" - else: - bound_layout = "b t p h d" - packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads) - q, k, v = ops.unbind(packed, dim=2) - else: - embed_dim = self.embed_dim - per_head_dim = (embed_dim // self.num_heads) - kv_heads = self.num_heads // self.kv_repeat - q = projected[:, :, :embed_dim] - start = embed_dim - end = start + per_head_dim * kv_heads - k = projected[:, :, start: end] - v = projected[:, :, end:] - q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads) - k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads) - v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads) - - if self.qk_layer_norm is True: - assert self.kv_repeat == 1 - q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]] - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]] - if self.rope: - q, k = self._apply_rope(q, k) - k, v = self._complete_kv(k, v) - if self.kv_repeat > 1: - k = expand_repeated_kv(k, self.kv_repeat) - v = expand_repeated_kv(v, self.kv_repeat) - if self.attention_as_float32: - q, k, v = [x.float() for x in [q, k, v]] - if self.memory_efficient: - p = self.dropout if self.training else 0 - if _efficient_attention_backend == 'torch': - x = torch.nn.functional.scaled_dot_product_attention( - q, k, v, is_causal=attn_mask is not None, dropout_p=p) - else: - x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p) - else: - # We include the dot product as float32, for consistency - # with the other implementations that include that step - # as part of the attention. Note that when using `autocast`, - # the einsums would be done as bfloat16, but the softmax - # would be done as bfloat16, so `attention_as_float32` will - # extend a bit the range of operations done in float32, - # although this should make no difference. - q = q / q.shape[-1] ** 0.5 - key_layout = layout.replace('t', 'k') - query_layout = layout - if self._is_streaming and self.safe_streaming and q.device.type == 'cuda': - with torch.autocast(device_type=q.device.type, dtype=torch.float32): - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - else: - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - if attn_mask is not None: - pre_w = pre_w + attn_mask - w = torch.softmax(pre_w, dim=-1) - w = F.dropout(w, self.dropout, training=self.training).to(v) - # Key and value have the same format. - x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v) - x = x.to(dtype) - x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads) - x = self.out_proj(x) - else: - key, value = self._complete_kv(key, value) - if self.attention_as_float32: - query, key, value = [x.float() for x in [query, key, value]] - x, _ = self.mha( - query, key, value, key_padding_mask, - need_weights, attn_mask, average_attn_weights) - x = x.to(dtype) - - return x, None - - -class StreamingTransformerLayer(nn.TransformerEncoderLayer): - """TransformerLayer with Streaming / Causal support. - This also integrates cross_attention, when passing `cross_attention=True`, - rather than having two separate classes like in PyTorch. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention. - qk_layer_norm_cross (bool): Same for the cross attention. - cross_attention (bool): If True, expect to get secondary input for cross-attention. - Cross attention will use the default MHA, as it typically won't require - special treatment. - layer_scale (float or None): If not None, LayerScale will be used with - the given value as initial scale. - rope (`RotaryEmbedding` or None): Rope embedding to use. - attention_dropout (float or None): If not None, separate the value of the dimension dropout - in FFN and of the attention dropout. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1, - bias_ff: bool = True, bias_attn: bool = True, causal: bool = False, - past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None, - kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs): - super().__init__(d_model, num_heads, dim_feedforward, dropout, - device=device, dtype=dtype, batch_first=True, **kwargs) - factory_kwargs = {'device': device, 'dtype': dtype} - # Redefine self_attn to our streaming multi-head attention - attn_kwargs: tp.Dict[str, tp.Any] = { - 'embed_dim': d_model, - 'num_heads': num_heads, - 'dropout': dropout if attention_dropout is None else attention_dropout, - 'bias': bias_attn, - 'custom': custom, - 'memory_efficient': memory_efficient, - 'attention_as_float32': attention_as_float32, - } - self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention( - causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm, - kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore - # Redefine feedforward layers to expose bias parameter - self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs) - self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs) - - self.layer_scale_1: nn.Module - self.layer_scale_2: nn.Module - if layer_scale is None: - self.layer_scale_1 = nn.Identity() - self.layer_scale_2 = nn.Identity() - else: - self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs) - self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs) - - self.cross_attention: tp.Optional[nn.Module] = None - if cross_attention: - self.cross_attention = StreamingMultiheadAttention( - cross_attention=True, qk_layer_norm=qk_layer_norm_cross, - **attn_kwargs, **factory_kwargs) - # Norm and dropout - self.dropout_cross = nn.Dropout(dropout) - # eps value matching that used in PyTorch reference implementation. - self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs) - self.layer_scale_cross: nn.Module - if layer_scale is None: - self.layer_scale_cross = nn.Identity() - else: - self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs) - self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - - def _cross_attention_block(self, src: torch.Tensor, - cross_attention_src: torch.Tensor) -> torch.Tensor: - assert self.cross_attention is not None - # queries are from src, keys and values from cross_attention_src. - x = self.cross_attention( - src, cross_attention_src, cross_attention_src, need_weights=False)[0] - return self.dropout_cross(x) # type: ignore - - def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore - src_key_padding_mask: tp.Optional[torch.Tensor] = None, - cross_attention_src: tp.Optional[torch.Tensor] = None): - if self.cross_attention is None: - assert cross_attention_src is None - else: - assert cross_attention_src is not None - x = src - if self.norm_first: - x = x + self.layer_scale_1( - self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)) - if cross_attention_src is not None: - x = x + self.layer_scale_cross( - self._cross_attention_block( - self.norm_cross(x), cross_attention_src)) - x = x + self.layer_scale_2(self._ff_block(self.norm2(x))) - else: - x = self.norm1(x + self.layer_scale_1( - self._sa_block(x, src_mask, src_key_padding_mask))) - if cross_attention_src is not None: - x = self.norm_cross( - x + self.layer_scale_cross( - self._cross_attention_block(src, cross_attention_src))) - x = self.norm2(x + self.layer_scale_2(self._ff_block(x))) - return x - - -class StreamingTransformer(StreamingModule): - """Transformer with Streaming / Causal support. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - cross_attention (bool): If True, expect to get secondary input for cross-attention. - layer_scale (float or None): If not None, LayerScale will be used - with the given value as initial scale. - positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope). - max_period (float): Maximum period of the time embedding. - positional_scale (float): Scale of positional embedding, set to 0 to deactivate. - xpos (bool): Apply xpos exponential decay to positional embedding (rope only). - lr (float or None): learning rate override through the `make_optim_group` API. - weight_decay (float or None): Weight_decay override through the `make_optim_group` API. - layer_class: (subclass of `StreamingTransformerLayer): class to use - to initialize the layers, allowing further customization outside of Audiocraft. - checkpointing (str): Checkpointing strategy to reduce memory usage. - No checkpointing if set to 'none'. Per layer checkpointing using PyTorch - if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice, - minimal memory usage, but maximal runtime). Finally, `xformers_default` provide - a policy for opting-out some operations of the checkpointing like - linear layers and attention, providing a middle ground between speed and memory. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048, - dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, - custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1., - xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None, - layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer, - checkpointing: str = 'none', device=None, dtype=None, **kwargs): - super().__init__() - assert d_model % num_heads == 0 - - self.positional_embedding = positional_embedding - self.max_period = max_period - self.positional_scale = positional_scale - self.weight_decay = weight_decay - self.lr = lr - - assert positional_embedding in ['sin', 'rope', 'sin_rope'] - self.rope: tp.Optional[RotaryEmbedding] = None - if self.positional_embedding in ['rope', 'sin_rope']: - assert _is_custom(custom, memory_efficient) - self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period, - xpos=xpos, scale=positional_scale, device=device) - - self.checkpointing = checkpointing - - assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm'] - if self.checkpointing.startswith('xformers'): - _verify_xformers_internal_compat() - - self.layers = nn.ModuleList() - for idx in range(num_layers): - self.layers.append( - layer_class( - d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward, - dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn, - causal=causal, past_context=past_context, custom=custom, - memory_efficient=memory_efficient, attention_as_float32=attention_as_float32, - cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope, - device=device, dtype=dtype, **kwargs)) - - if self.checkpointing != 'none': - for layer in self.layers: - # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the - # backward hook inside of FSDP... - layer._magma_checkpointed = True # type: ignore - assert layer.layer_drop == 0., "Need further checking" # type: ignore - - def _apply_layer(self, layer, *args, **kwargs): - method = self.checkpointing - if method == 'none': - return layer(*args, **kwargs) - elif method == 'torch': - return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs) - elif method.startswith('xformers'): - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy - if method == 'xformers_default': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "xformers.efficient_attention_forward_cutlass.default", - "xformers_flash.flash_fwd.default", - "aten.addmm.default", - "aten.mm.default", - ] - elif method == 'xformers_mm': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "aten.addmm.default", - "aten.mm.default", - ] - else: - raise ValueError(f"xformers checkpointing xformers policy {method} is not known.") - policy_fn = _get_default_policy(allow_list) - return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs) - else: - raise ValueError(f"Checkpointing method {method} is unknown.") - - def forward(self, x: torch.Tensor, *args, **kwargs): - B, T, C = x.shape - - if 'offsets' in self._streaming_state: - offsets = self._streaming_state['offsets'] - else: - offsets = torch.zeros(B, dtype=torch.long, device=x.device) - - if self.positional_embedding in ['sin', 'sin_rope']: - positions = torch.arange(T, device=x.device).view(1, -1, 1) - positions = positions + offsets.view(-1, 1, 1) - pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype) - x = x + self.positional_scale * pos_emb - - for layer in self.layers: - x = self._apply_layer(layer, x, *args, **kwargs) - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return x - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - if self.weight_decay is not None: - group["weight_decay"] = self.weight_decay - return group - - -# special attention attention related function - -def _verify_xformers_memory_efficient_compat(): - try: - from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa - except ImportError: - raise ImportError( - "xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _verify_xformers_internal_compat(): - try: - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa - except ImportError: - raise ImportError( - "Francisco's fairinternal xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _is_custom(custom: bool, memory_efficient: bool): - return custom or memory_efficient diff --git a/spaces/Yunshansongbai/SVC-Nahida/.ipynb_checkpoints/build.gradio-checkpoint.py b/spaces/Yunshansongbai/SVC-Nahida/.ipynb_checkpoints/build.gradio-checkpoint.py deleted file mode 100644 index 655744d20675e2ce6bf7c96fa99af8714b8a14ec..0000000000000000000000000000000000000000 --- a/spaces/Yunshansongbai/SVC-Nahida/.ipynb_checkpoints/build.gradio-checkpoint.py +++ /dev/null @@ -1,176 +0,0 @@ -import io -import os - -import gradio as gr -import librosa -import numpy as np -import soundfile -from inference.infer_tool import Svc -import logging -import os -import paddle -import requests -import utils -from spleeter import Separator - -build_dir=os.getcwd() -if build_dir == "/home/aistudio": - build_dir += "/build" - -model_dir=build_dir+'/trained_models' - -model_list_path = model_dir + "/model_list.txt" - -# 筛选出文件夹 -models = [] -for filename in os.listdir(model_dir): - # 判断文件名是否以 '.pdparams' 结尾,并且不包含后缀部分 - if filename.endswith('.pdparams') and os.path.splitext(filename)[0].isalpha(): - models.append(os.path.splitext(filename)[0]) -cache_model = {} - -def reboot(): - os.execv(sys.executable, ['python'] + sys.argv) - -def separate_fn(song_input): - try: - if song_input is None: - return "请上传歌曲",None,None,None,None - params_2stems = { - 'sample_rate': 44100, - 'frame_length': 4096, - 'frame_step': 1024, - 'T': 512, - 'F': 1024, - 'num_instruments': ['vocals', 'instrumental'], - 'output_dir': build_dir+'/output_2stems', - 'checkpoint_path': build_dir+'/spleeter', - 'use_elu': False} - sampling_rate, song = song_input - soundfile.write("temp.wav", song, sampling_rate, format="wav") - # 初始化分离器 - sep = Separator(params_2stems) - sep.separate('temp.wav') - vocal_path = params_2stems["output_dir"]+"/temp-vocals.wav" - instrumental_path = params_2stems["output_dir"]+"/temp-instrumental.wav" - return "分离成功,请继续前往体验【转换】和【混音】",vocal_path,instrumental_path,vocal_path,instrumental_path - except Exception as e: - import traceback - return traceback.format_exc() , None,None,None,None - - -def convert_fn(model_name, input_audio,input_audio_micro, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale): - try: - if model_name in cache_model: - model = cache_model[model_name] - else: - if paddle.device.is_compiled_with_cuda()==False and len(cache_model)!=0: - return f"目前运行环境为CPU,受制于平台算力,每次启动本项目只允许加载1个模型,当前已加载{next(iter(cache_model))}",None,None - config_path = f"{build_dir}/trained_models/config.json" - model = Svc(f"{build_dir}/trained_models/{model_name}.pdparams", config_path,mode="test") - cache_model[model_name] = model - if input_audio is None and input_audio_micro is None: - return "请上传音频", None,None - if input_audio_micro is not None: - input_audio = input_audio_micro - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - print(audio.shape) - out_wav_path = "temp.wav" - soundfile.write(out_wav_path, audio, 16000, format="wav") - print(cluster_ratio, auto_f0, noise_scale) - _audio = model.slice_inference(out_wav_path, model_name, vc_transform, slice_db, cluster_ratio, auto_f0, noise_scale) - del model - return "转换成功,请继续前往体验【混音】", (44100, _audio),(44100, _audio) - except Exception as e: - import traceback - return traceback.format_exc() , None,None - -def compose_fn(input_vocal,input_instrumental,mixing_ratio=0.5): - try: - outlog = "混音成功" - if input_vocal is None: - return "请上传人声", None - if input_instrumental is None: - return "请上传伴奏", None - vocal_sampling_rate, vocal = input_vocal - vocal_duration = vocal.shape[0] / vocal_sampling_rate - vocal = (vocal / np.iinfo(vocal.dtype).max).astype(np.float32) - if len(vocal.shape) > 1: - vocal = librosa.to_mono(vocal.transpose(1, 0)) - if vocal_sampling_rate != 44100: - vocal = librosa.resample(vocal, orig_sr=vocal_sampling_rate, target_sr=44100) - - instrumental_sampling_rate, instrumental = input_instrumental - instrumental_duration = instrumental.shape[0] / instrumental_sampling_rate - instrumental = (instrumental / np.iinfo(instrumental.dtype).max).astype(np.float32) - if len(instrumental.shape) > 1: - instrumental = librosa.to_mono(instrumental.transpose(1, 0)) - if instrumental_sampling_rate != 44100: - instrumental = librosa.resample(instrumental, orig_sr=instrumental_sampling_rate, target_sr=44100) - if len(vocal)!=len(instrumental): - min_length = min(len(vocal),len(instrumental)) - instrumental = instrumental[:min_length] - vocal = vocal[:min_length] - outlog = "人声伴奏长度不一致,已自动截断较长的音频" - - mixed_audio = (1 - mixing_ratio) * vocal + mixing_ratio * instrumental - mixed_audio_data = mixed_audio.astype(np.float32) - return outlog,(44100,mixed_audio_data) - except Exception as e: - import traceback - return traceback.format_exc() , None - - -app = gr.Blocks() -with app: - gr.Markdown('

      SVC歌声转换全流程体验(伴奏分离,转换,混音)

      ') - btn_reboot = gr.Button("重启程序", variant="primary") - with gr.Tabs() as tabs: - with gr.TabItem("人声伴奏分离"): - gr.Markdown('

      该项目人声分离的效果弱于UVR5,如自备分离好的伴奏和人声可跳过该步骤

      ') - song_input = gr.Audio(label="上传歌曲(tips:上传后点击右上角✏可以进行歌曲剪辑)",interactive=True) - gr.Examples(examples=[build_dir+"/examples/song/blue.wav",build_dir+"/examples/song/Counter_clockwise_Clock.wav",build_dir+"/examples/song/one_last_kiss.wav"],inputs=song_input,label="歌曲样例") - - btn_separate = gr.Button("人声伴奏分离", variant="primary") - text_output1 = gr.Textbox(label="输出信息") - vocal_output1 = gr.Audio(label="输出人声",interactive=False) - instrumental_output1 = gr.Audio(label="输出伴奏",interactive=False) - with gr.TabItem("转换"): - model_name = gr.Dropdown(label="模型", choices=models, value="纳西妲") - vocal_input1 = gr.Audio(label="上传人声",interactive=True) - gr.Examples(examples=[build_dir+"/examples/vocals/blue_vocal.wav",build_dir+"/examples/vocals/Counter_clockwise_Clock_vocal.wav",build_dir+"/examples/vocals/one_last_kiss_vocal.wav"],inputs=vocal_input1,label="人声样例") - btn_use_separate = gr.Button("使用【人声伴奏分离】分离的人声") - micro_input = gr.Audio(label="麦克风输入(优先于上传的人声)",source="microphone",interactive=True) - vc_transform = gr.Number(label="变调(半音数量,升八度12降八度-12)", value=0) - cluster_ratio = gr.Number(label="聚类模型混合比例", value=0,visible=False) - auto_f0 = gr.Checkbox(label="自动预测音高(转换歌声时不要打开,会严重跑调)", value=False) - slice_db = gr.Number(label="静音分贝阈值(嘈杂的音频可以-30,干声保留呼吸可以-50)", value=-50) - noise_scale = gr.Number(label="noise_scale", value=0.2) - btn_convert = gr.Button("转换", variant="primary") - text_output2 = gr.Textbox(label="输出信息") - vc_output2 = gr.Audio(label="输出音频",interactive=False) - - with gr.TabItem("混音"): - vocal_input2 = gr.Audio(label="上传人声",interactive=True) - btn_use_convert = gr.Button("使用【转换】输出的人声") - instrumental_input1 = gr.Audio(label="上传伴奏") - gr.Examples(examples=[build_dir+"/examples/instrumental/blue_instrumental.wav",build_dir+"/examples/instrumental/Counter_clockwise_Clock_instrumental.wav",build_dir+"/examples/instrumental/one_last_kiss_instrumental.wav"],inputs=instrumental_input1,label="伴奏样例") - btn_use_separate2 = gr.Button("使用【人声伴奏分离】分离的伴奏") - mixing_ratio = gr.Slider(0, 1, value=0.75,step=0.01,label="混音比例(人声:伴奏)", info="人声:伴奏") - btn_compose = gr.Button("混音", variant="primary") - text_output3 = gr.Textbox(label="输出信息") - song_output = gr.Audio(label="输出歌曲",interactive=False) - btn_separate.click(separate_fn, song_input, [text_output1, vocal_output1,instrumental_output1,vocal_input1,instrumental_input1]) - btn_convert.click(convert_fn, [model_name, vocal_input1,micro_input,vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale], [text_output2, vc_output2,vocal_input2]) - btn_reboot.click(reboot) - btn_use_convert.click(lambda x:x,vc_output2,vocal_input2) - btn_use_separate.click(lambda x:x,vocal_output1,vocal_input1) - btn_use_separate2.click(lambda x:x,instrumental_output1,instrumental_input1) - -app.launch() diff --git a/spaces/ZJunTvT/ZJunChat/modules/webui_locale.py b/spaces/ZJunTvT/ZJunChat/modules/webui_locale.py deleted file mode 100644 index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000 --- a/spaces/ZJunTvT/ZJunChat/modules/webui_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import locale -import commentjson as json - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - lang_config = config.get("language", "auto") - language = os.environ.get("LANGUAGE", lang_config) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/a5656789/ganqx/app.py b/spaces/a5656789/ganqx/app.py deleted file mode 100644 index 2439c5cec6b61e8a517f957daf710cbb6b5c3cf6..0000000000000000000000000000000000000000 --- a/spaces/a5656789/ganqx/app.py +++ /dev/null @@ -1,62 +0,0 @@ -from upcunet_v3 import RealWaifuUpScaler -import gradio as gr -import time -import logging -import os -from PIL import ImageOps -import numpy as np -import math - - -def greet(input_img, input_model_name, input_tile_mode): - # if input_img.size[0] * input_img.size[1] > 256 * 256: - # y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1])) - # x = int(input_img.size[0]/input_img.size[1]*y) - # input_img = ImageOps.fit(input_img, (x, y)) - input_img = np.array(input_img) - if input_model_name not in model_cache: - t1 = time.time() - upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu") - t2 = time.time() - logger.info(f'load model time, {t2 - t1}') - model_cache[input_model_name] = upscaler - else: - upscaler = model_cache[input_model_name] - logger.info(f'load model from cache') - - start = time.time() - result = upscaler(input_img, tile_mode=input_tile_mode) - end = time.time() - logger.info(f'input_model_name, {input_model_name}') - logger.info(f'input_tile_mode, {input_tile_mode}') - logger.info(f'input shape, {input_img.shape}') - logger.info(f'output shape, {result.shape}') - logger.info(f'speed time, {end - start}') - return result - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s") - logger = logging.getLogger() - - ModelPath = "weights_v3/" - model_cache = {} - - input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model') - input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode') - input_img = gr.inputs.Image(label='image', type='pil') - - inputs = [input_img, input_model_name, input_tile_mode] - outputs = "image" - iface = gr.Interface(fn=greet, - inputs=inputs, - outputs=outputs, - allow_screenshot=False, - allow_flagging='never', - examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]], - article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)
      ' - '感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。
      ' - '修改bbb' - 'The large image will lead to memory limit exceeded. So I crop and resize image. ' - 'If you want to experience the large image, please go to the link above.') - iface.launch() diff --git a/spaces/aaronayitey/Covid_19-Vaccine-Sentiment_Analysis/app.py b/spaces/aaronayitey/Covid_19-Vaccine-Sentiment_Analysis/app.py deleted file mode 100644 index 8560466bfa37571033ef7d2922b63f88f64cbf23..0000000000000000000000000000000000000000 --- a/spaces/aaronayitey/Covid_19-Vaccine-Sentiment_Analysis/app.py +++ /dev/null @@ -1,80 +0,0 @@ -# Import libraries -import os -import uuid -import pandas as pd -import numpy as np -from scipy.special import softmax -import gradio as gr - -import torch -from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification, TFAutoModelForSequenceClassification, IntervalStrategy, TrainingArguments, EarlyStoppingCallback, pipeline, Trainer -from torch import nn - - - - -# Define the model path where the pre-trained model is saved on the Hugging Face model hub -model_path = "aaronayitey/Sentiment-classfication-ROBERTA-model" - -# Initialize the tokenizer for the pre-trained model -tokenizer = AutoTokenizer.from_pretrained('roberta-base') - -# Load the configuration for the pre-trained model -config = AutoConfig.from_pretrained(model_path) - -# Load the pre-trained model -model = AutoModelForSequenceClassification.from_pretrained(model_path) - -# Define a function to preprocess the text data -def preprocess(text): - new_text = [] - # Replace user mentions with '@user' - for t in text.split(" "): - t = '@user' if t.startswith('@') and len(t) > 1 else t - # Replace links with 'http' - t = 'http' if t.startswith('http') else t - new_text.append(t) - # Join the preprocessed text - return " ".join(new_text) - -# Define a function to perform sentiment analysis on the input text -def sentiment_analysis(text): - # Preprocess the input text - text = preprocess(text) - - # Tokenize the input text using the pre-trained tokenizer - encoded_input = tokenizer(text, return_tensors='pt') - - # Feed the tokenized input to the pre-trained model and obtain output - output = model(**encoded_input) - - scores_ = softmax(output.logits[0].detach().numpy()) - - # Format the output dictionary with the predicted scores - labels = ['Negative', 'Neutral', 'Positive'] - scores = {l: float(s) for (l, s) in zip(labels, scores_)} - - # Get the label with the highest score - max_score_label = max(scores, key=scores.get) - - # Return the label with the highest score and the probability scores as text - probabilities_text = "\n".join([f"{label}: {score:.2%}" for label, score in scores.items()]) - return f"Label: {max_score_label}\n\nProbabilities:\n{probabilities_text}" - -# Define a Gradio interface to interact with the model -demo = gr.Interface( - fn=sentiment_analysis, # Function to perform sentiment analysis - inputs=gr.Textbox(placeholder="Write your tweet here..."), # Text input field - outputs="text", # Output type for text display - examples=[ - ["This vaccine is wonderful!", "You better don't take that vaccine in!", "Don't have a word about the vaccine."], - ["I love this product!", "Not sure if I like it.", "This is terrible."], - ["Great weather today!", "I can't stand the rain.", "It's so-so."], - # Add more examples as needed - ] -) - -# Launch the Gradio interface -demo.launch(debug=True) - - diff --git a/spaces/abdvl/datahub_qa_bot/train.py b/spaces/abdvl/datahub_qa_bot/train.py deleted file mode 100644 index cb0f374d06f1549b03a4633ed23b57981136fe33..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/train.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import streamlit as st -from langchain.chains import RetrievalQA -from langchain.llms import OpenAI -from langchain.document_loaders import DirectoryLoader -from langchain.text_splitter import CharacterTextSplitter -from langchain.indexes import VectorstoreIndexCreator -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores import Chroma -from langchain.callbacks import get_openai_callback - - -# variables -db_folder = "db" - -# set your OpenAI API key -os.environ["OPENAI_API_KEY"] = "..." - - -# initialize the language model -llm = OpenAI(model_name="text-ada-001", n=2, best_of=2) -with get_openai_callback() as cb: - - # load the documents - loader = DirectoryLoader('./docs', glob="**/*.md") - documents = loader.load() - # print(documents[0]) - - # split the documents into chunks - text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - texts = text_splitter.split_documents(documents) - - # create the embeddings and index - embeddings = OpenAIEmbeddings() - - # create the vectorstore and retriever - db = Chroma.from_documents( - texts, embeddings, persist_directory=db_folder) - - retriever = db.as_retriever(search_type="mmr") - - # initialize the chain - qa = RetrievalQA.from_chain_type( - llm=llm, chain_type="stuff", retriever=retriever) - - # store the embedings and index - db.persist() - - print(f"Total Tokens: {cb.total_tokens}") - print(f"Prompt Tokens: {cb.prompt_tokens}") - print(f"Completion Tokens: {cb.completion_tokens}") - print(f"Successful Requests: {cb.successful_requests}") - print(f"Total Cost (USD): ${cb.total_cost}") diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/logger/base.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/logger/base.py deleted file mode 100644 index f845256729458ced821762a1b8ef881e17ff9955..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/logger/base.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers -from abc import ABCMeta, abstractmethod - -import numpy as np -import torch - -from ..hook import Hook - - -class LoggerHook(Hook): - """Base class for logger hooks. - - Args: - interval (int): Logging interval (every k iterations). - ignore_last (bool): Ignore the log of last iterations in each epoch - if less than `interval`. - reset_flag (bool): Whether to clear the output buffer after logging. - by_epoch (bool): Whether EpochBasedRunner is used. - """ - - __metaclass__ = ABCMeta - - def __init__(self, - interval=10, - ignore_last=True, - reset_flag=False, - by_epoch=True): - self.interval = interval - self.ignore_last = ignore_last - self.reset_flag = reset_flag - self.by_epoch = by_epoch - - @abstractmethod - def log(self, runner): - pass - - @staticmethod - def is_scalar(val, include_np=True, include_torch=True): - """Tell the input variable is a scalar or not. - - Args: - val: Input variable. - include_np (bool): Whether include 0-d np.ndarray as a scalar. - include_torch (bool): Whether include 0-d torch.Tensor as a scalar. - - Returns: - bool: True or False. - """ - if isinstance(val, numbers.Number): - return True - elif include_np and isinstance(val, np.ndarray) and val.ndim == 0: - return True - elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1: - return True - else: - return False - - def get_mode(self, runner): - if runner.mode == 'train': - if 'time' in runner.log_buffer.output: - mode = 'train' - else: - mode = 'val' - elif runner.mode == 'val': - mode = 'val' - else: - raise ValueError(f"runner mode should be 'train' or 'val', " - f'but got {runner.mode}') - return mode - - def get_epoch(self, runner): - if runner.mode == 'train': - epoch = runner.epoch + 1 - elif runner.mode == 'val': - # normal val mode - # runner.epoch += 1 has been done before val workflow - epoch = runner.epoch - else: - raise ValueError(f"runner mode should be 'train' or 'val', " - f'but got {runner.mode}') - return epoch - - def get_iter(self, runner, inner_iter=False): - """Get the current training iteration step.""" - if self.by_epoch and inner_iter: - current_iter = runner.inner_iter + 1 - else: - current_iter = runner.iter + 1 - return current_iter - - def get_lr_tags(self, runner): - tags = {} - lrs = runner.current_lr() - if isinstance(lrs, dict): - for name, value in lrs.items(): - tags[f'learning_rate/{name}'] = value[0] - else: - tags['learning_rate'] = lrs[0] - return tags - - def get_momentum_tags(self, runner): - tags = {} - momentums = runner.current_momentum() - if isinstance(momentums, dict): - for name, value in momentums.items(): - tags[f'momentum/{name}'] = value[0] - else: - tags['momentum'] = momentums[0] - return tags - - def get_loggable_tags(self, - runner, - allow_scalar=True, - allow_text=False, - add_mode=True, - tags_to_skip=('time', 'data_time')): - tags = {} - for var, val in runner.log_buffer.output.items(): - if var in tags_to_skip: - continue - if self.is_scalar(val) and not allow_scalar: - continue - if isinstance(val, str) and not allow_text: - continue - if add_mode: - var = f'{self.get_mode(runner)}/{var}' - tags[var] = val - tags.update(self.get_lr_tags(runner)) - tags.update(self.get_momentum_tags(runner)) - return tags - - def before_run(self, runner): - for hook in runner.hooks[::-1]: - if isinstance(hook, LoggerHook): - hook.reset_flag = True - break - - def before_epoch(self, runner): - runner.log_buffer.clear() # clear logs of last epoch - - def after_train_iter(self, runner): - if self.by_epoch and self.every_n_inner_iters(runner, self.interval): - runner.log_buffer.average(self.interval) - elif not self.by_epoch and self.every_n_iters(runner, self.interval): - runner.log_buffer.average(self.interval) - elif self.end_of_epoch(runner) and not self.ignore_last: - # not precise but more stable - runner.log_buffer.average(self.interval) - - if runner.log_buffer.ready: - self.log(runner) - if self.reset_flag: - runner.log_buffer.clear_output() - - def after_train_epoch(self, runner): - if runner.log_buffer.ready: - self.log(runner) - if self.reset_flag: - runner.log_buffer.clear_output() - - def after_val_epoch(self, runner): - runner.log_buffer.average() - self.log(runner) - if self.reset_flag: - runner.log_buffer.clear_output() diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py deleted file mode 100644 index a888cb8c188ca6fe63045b6230266553fbe8c996..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py +++ /dev/null @@ -1,236 +0,0 @@ -import copy -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv import ConfigDict -from mmcv.cnn import normal_init -from mmcv.ops import batched_nms - -from ..builder import HEADS -from .anchor_head import AnchorHead -from .rpn_test_mixin import RPNTestMixin - - -@HEADS.register_module() -class RPNHead(RPNTestMixin, AnchorHead): - """RPN head. - - Args: - in_channels (int): Number of channels in the input feature map. - """ # noqa: W605 - - def __init__(self, in_channels, **kwargs): - super(RPNHead, self).__init__(1, in_channels, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.rpn_conv = nn.Conv2d( - self.in_channels, self.feat_channels, 3, padding=1) - self.rpn_cls = nn.Conv2d(self.feat_channels, - self.num_anchors * self.cls_out_channels, 1) - self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) - - def init_weights(self): - """Initialize weights of the head.""" - normal_init(self.rpn_conv, std=0.01) - normal_init(self.rpn_cls, std=0.01) - normal_init(self.rpn_reg, std=0.01) - - def forward_single(self, x): - """Forward feature map of a single scale level.""" - x = self.rpn_conv(x) - x = F.relu(x, inplace=True) - rpn_cls_score = self.rpn_cls(x) - rpn_bbox_pred = self.rpn_reg(x) - return rpn_cls_score, rpn_bbox_pred - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - losses = super(RPNHead, self).loss( - cls_scores, - bbox_preds, - gt_bboxes, - None, - img_metas, - gt_bboxes_ignore=gt_bboxes_ignore) - return dict( - loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) - - def _get_bboxes(self, - cls_scores, - bbox_preds, - mlvl_anchors, - img_shapes, - scale_factors, - cfg, - rescale=False): - """Transform outputs for a single batch item into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - mlvl_anchors (list[Tensor]): Box reference for each scale level - with shape (num_total_anchors, 4). - img_shapes (list[tuple[int]]): Shape of the input image, - (height, width, 3). - scale_factors (list[ndarray]): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class labelof the - corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - cfg = copy.deepcopy(cfg) - # bboxes from different level should be independent during NMS, - # level_ids are used as labels for batched NMS to separate them - level_ids = [] - mlvl_scores = [] - mlvl_bbox_preds = [] - mlvl_valid_anchors = [] - batch_size = cls_scores[0].shape[0] - nms_pre_tensor = torch.tensor( - cfg.nms_pre, device=cls_scores[0].device, dtype=torch.long) - for idx in range(len(cls_scores)): - rpn_cls_score = cls_scores[idx] - rpn_bbox_pred = bbox_preds[idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(batch_size, -1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(batch_size, -1, 2) - # We set FG labels to [0, num_class-1] and BG label to - # num_class in RPN head since mmdet v2.5, which is unified to - # be consistent with other head since mmdet v2.0. In mmdet v2.0 - # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. - scores = rpn_cls_score.softmax(-1)[..., 0] - rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).reshape( - batch_size, -1, 4) - anchors = mlvl_anchors[idx] - anchors = anchors.expand_as(rpn_bbox_pred) - if nms_pre_tensor > 0: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) - # keep topk op for dynamic k in onnx model - if torch.onnx.is_in_onnx_export(): - # sort op will be converted to TopK in onnx - # and k<=3480 in TensorRT - scores_shape = torch._shape_as_tensor(scores) - nms_pre = torch.where(scores_shape[1] < nms_pre_tensor, - scores_shape[1], nms_pre_tensor) - _, topk_inds = scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds) - scores = scores[batch_inds, topk_inds] - rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :] - anchors = anchors[batch_inds, topk_inds, :] - - elif scores.shape[-1] > cfg.nms_pre: - ranked_scores, rank_inds = scores.sort(descending=True) - topk_inds = rank_inds[:, :cfg.nms_pre] - scores = ranked_scores[:, :cfg.nms_pre] - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds) - rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :] - anchors = anchors[batch_inds, topk_inds, :] - - mlvl_scores.append(scores) - mlvl_bbox_preds.append(rpn_bbox_pred) - mlvl_valid_anchors.append(anchors) - level_ids.append( - scores.new_full(( - batch_size, - scores.size(1), - ), - idx, - dtype=torch.long)) - - batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) - batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1) - batch_mlvl_rpn_bbox_pred = torch.cat(mlvl_bbox_preds, dim=1) - batch_mlvl_proposals = self.bbox_coder.decode( - batch_mlvl_anchors, batch_mlvl_rpn_bbox_pred, max_shape=img_shapes) - batch_mlvl_ids = torch.cat(level_ids, dim=1) - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You ' \ - f'set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - 'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ - f' iou_threshold in nms and ' \ - f'nms_thr at the same time, but get' \ - f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - result_list = [] - for (mlvl_proposals, mlvl_scores, - mlvl_ids) in zip(batch_mlvl_proposals, batch_mlvl_scores, - batch_mlvl_ids): - # Skip nonzero op while exporting to ONNX - if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()): - w = mlvl_proposals[:, 2] - mlvl_proposals[:, 0] - h = mlvl_proposals[:, 3] - mlvl_proposals[:, 1] - valid_ind = torch.nonzero( - (w >= cfg.min_bbox_size) - & (h >= cfg.min_bbox_size), - as_tuple=False).squeeze() - if valid_ind.sum().item() != len(mlvl_proposals): - mlvl_proposals = mlvl_proposals[valid_ind, :] - mlvl_scores = mlvl_scores[valid_ind] - mlvl_ids = mlvl_ids[valid_ind] - - dets, keep = batched_nms(mlvl_proposals, mlvl_scores, mlvl_ids, - cfg.nms) - result_list.append(dets[:cfg.max_per_img]) - return result_list diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/psp_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/psp_head.py deleted file mode 100644 index b54481f8d0f8b290f3b4c2d5444bdfbd70cd7fa5..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/psp_head.py +++ /dev/null @@ -1,113 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class PPM(nn.ModuleList): - """Pooling Pyramid Module used in PSPNet. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict): Config of activation layers. - align_corners (bool): align_corners argument of F.interpolate. - """ - - def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, - act_cfg, align_corners): - super(PPM, self).__init__() - self.pool_scales = pool_scales - self.align_corners = align_corners - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - for pool_scale in pool_scales: - self.append( - nn.Sequential( - nn.AdaptiveAvgPool2d(pool_scale), - ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg))) - - def forward(self, x): - """Forward function.""" - ppm_outs = [] - for ppm in self: - ppm_out = ppm(x) - upsampled_ppm_out = resize( - ppm_out, - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ppm_outs.append(upsampled_ppm_out) - return ppm_outs - - -@HEADS.register_module() -class PSPHead(BaseDecodeHead): - """Pyramid Scene Parsing Network. - - This head is the implementation of - `PSPNet `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. Default: (1, 2, 3, 6). - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(PSPHead, self).__init__(**kwargs) - assert isinstance(pool_scales, (list, tuple)) - self.pool_scales = pool_scales - self.psp_modules = PPM( - self.pool_scales, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - self.bottleneck = ConvModule( - self.in_channels + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - psp_outs = [x] - psp_outs.extend(self.psp_modules(x)) - psp_outs = torch.cat(psp_outs, dim=1) - output = self.bottleneck(psp_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/text/__init__.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/text/__init__.py deleted file mode 100644 index 0772500426f9397c5f171a0d326fa2f3a8aa5f94..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/text/__init__.py +++ /dev/null @@ -1,523 +0,0 @@ -"""Text formatting, layout and display. - -This module provides classes for loading styled documents from text files, -HTML files and a pyglet-specific markup format. Documents can be styled with -multiple fonts, colours, styles, text sizes, margins, paragraph alignments, -and so on. - -Using the layout classes, documents can be laid out on a single line or -word-wrapped to fit a rectangle. A layout can then be efficiently drawn in -a window or updated incrementally (for example, to support interactive text -editing). - -The label classes provide a simple interface for the common case where an -application simply needs to display some text in a window. - -A plain text label can be created with:: - - label = pyglet.text.Label('Hello, world', - font_name='Times New Roman', - font_size=36, - x=10, y=10) - -Alternatively, a styled text label using HTML can be created with:: - - label = pyglet.text.HTMLLabel('Hello, world', - x=10, y=10) - -Either label can then be drawn at any time with:: - - label.draw() - -For details on the subset of HTML supported, see `pyglet.text.formats.html`. - -Refer to the Programming Guide for advanced usage of the document and layout -classes, including interactive editing, embedding objects within documents and -creating scrollable layouts. - -.. versionadded:: 1.1 -""" - -from os.path import dirname as _dirname -from os.path import splitext as _splitext - -import pyglet - -from pyglet.text import layout, document, caret - - -class DocumentDecodeException(Exception): - """An error occurred decoding document text.""" - pass - - -class DocumentDecoder: - """Abstract document decoder. - """ - - def decode(self, text, location=None): - """Decode document text. - - :Parameters: - `text` : str - Text to decode - `location` : `Location` - Location to use as base path for additional resources - referenced within the document (for example, HTML images). - - :rtype: `AbstractDocument` - """ - raise NotImplementedError('abstract') - - -def get_decoder(filename, mimetype=None): - """Get a document decoder for the given filename and MIME type. - - If `mimetype` is omitted it is guessed from the filename extension. - - The following MIME types are supported: - - ``text/plain`` - Plain text - ``text/html`` - HTML 4 Transitional - ``text/vnd.pyglet-attributed`` - Attributed text; see `pyglet.text.formats.attributed` - - `DocumentDecodeException` is raised if another MIME type is given. - - :Parameters: - `filename` : str - Filename to guess the MIME type from. If a MIME type is given, - the filename is ignored. - `mimetype` : str - MIME type to lookup, or ``None`` to guess the type from the - filename. - - :rtype: `DocumentDecoder` - """ - if mimetype is None: - _, ext = _splitext(filename) - if ext.lower() in ('.htm', '.html', '.xhtml'): - mimetype = 'text/html' - else: - mimetype = 'text/plain' - - if mimetype == 'text/plain': - from pyglet.text.formats import plaintext - return plaintext.PlainTextDecoder() - elif mimetype == 'text/html': - from pyglet.text.formats import html - return html.HTMLDecoder() - elif mimetype == 'text/vnd.pyglet-attributed': - from pyglet.text.formats import attributed - return attributed.AttributedTextDecoder() - else: - raise DocumentDecodeException(f'Unknown format "{mimetype}"') - - -def load(filename, file=None, mimetype=None): - """Load a document from a file. - - :Parameters: - `filename` : str - Filename of document to load. - `file` : file-like object - File object containing encoded data. If omitted, `filename` is - loaded from disk. - `mimetype` : str - MIME type of the document. If omitted, the filename extension is - used to guess a MIME type. See `get_decoder` for a list of - supported MIME types. - - :rtype: `AbstractDocument` - """ - decoder = get_decoder(filename, mimetype) - if not file: - with open(filename) as f: - file_contents = f.read() - else: - file_contents = file.read() - file.close() - - if hasattr(file_contents, "decode"): - file_contents = file_contents.decode() - - location = pyglet.resource.FileLocation(_dirname(filename)) - return decoder.decode(file_contents, location) - - -def decode_html(text, location=None): - """Create a document directly from some HTML formatted text. - - :Parameters: - `text` : str - HTML data to decode. - `location` : str - Location giving the base path for additional resources - referenced from the document (e.g., images). - - :rtype: `FormattedDocument` - """ - decoder = get_decoder(None, 'text/html') - return decoder.decode(text, location) - - -def decode_attributed(text): - """Create a document directly from some attributed text. - - See `pyglet.text.formats.attributed` for a description of attributed text. - - :Parameters: - `text` : str - Attributed text to decode. - - :rtype: `FormattedDocument` - """ - decoder = get_decoder(None, 'text/vnd.pyglet-attributed') - return decoder.decode(text) - - -def decode_text(text): - """Create a document directly from some plain text. - - :Parameters: - `text` : str - Plain text to initialise the document with. - - :rtype: `UnformattedDocument` - """ - decoder = get_decoder(None, 'text/plain') - return decoder.decode(text) - - -class DocumentLabel(layout.TextLayout): - """Base label class. - - A label is a layout that exposes convenience methods for manipulating the - associated document. - """ - - def __init__(self, document=None, - x=0, y=0, z=0, width=None, height=None, - anchor_x='left', anchor_y='baseline', - multiline=False, dpi=None, batch=None, group=None, rotation=0): - """Create a label for a given document. - - :Parameters: - `document` : `AbstractDocument` - Document to attach to the layout. - `x` : int - X coordinate of the label. - `y` : int - Y coordinate of the label. - `z` : int - Z coordinate of the label. - `width` : int - Width of the label in pixels, or None - `height` : int - Height of the label in pixels, or None - `anchor_x` : str - Anchor point of the X coordinate: one of ``"left"``, - ``"center"`` or ``"right"``. - `anchor_y` : str - Anchor point of the Y coordinate: one of ``"bottom"``, - ``"baseline"``, ``"center"`` or ``"top"``. - `multiline` : bool - If True, the label will be word-wrapped and accept newline - characters. You must also set the width of the label. - `dpi` : float - Resolution of the fonts in this layout. Defaults to 96. - `batch` : `~pyglet.graphics.Batch` - Optional graphics batch to add the label to. - `group` : `~pyglet.graphics.Group` - Optional graphics group to use. - `rotation`: float - The amount to rotate the label in degrees. A positive amount - will be a clockwise rotation, negative values will result in - counter-clockwise rotation. - - """ - super().__init__(document, width, height, multiline, dpi, batch, group) - self._x = x - self._y = y - self._z = z - self._rotation = rotation - self._anchor_x = anchor_x - self._anchor_y = anchor_y - self._update() - - @property - def text(self): - """The text of the label. - - :type: str - """ - return self.document.text - - @text.setter - def text(self, text): - self.document.text = text - - @property - def color(self): - """Text color. - - Color is a 4-tuple of RGBA components, each in range [0, 255]. - - :type: (int, int, int, int) - """ - return self.document.get_style('color') - - @color.setter - def color(self, color): - self.document.set_style(0, len(self.document.text), {'color': color}) - - @property - def opacity(self): - """Blend opacity. - - This property sets the alpha component of the colour of the label's - vertices. With the default blend mode, this allows the layout to be - drawn with fractional opacity, blending with the background. - - An opacity of 255 (the default) has no effect. An opacity of 128 will - make the label appear semi-translucent. - - :type: int - """ - return self.color[3] - - @opacity.setter - def opacity(self, alpha): - if alpha != self.color[3]: - self.color = list(map(int, (*self.color[:3], alpha))) - - @property - def font_name(self): - """Font family name. - - The font name, as passed to :py:func:`pyglet.font.load`. A list of names can - optionally be given: the first matching font will be used. - - :type: str or list - """ - return self.document.get_style('font_name') - - @font_name.setter - def font_name(self, font_name): - self.document.set_style(0, len(self.document.text), {'font_name': font_name}) - - @property - def font_size(self): - """Font size, in points. - - :type: float - """ - return self.document.get_style('font_size') - - @font_size.setter - def font_size(self, font_size): - self.document.set_style(0, len(self.document.text), {'font_size': font_size}) - - @property - def bold(self): - """Bold font style. - - :type: bool - """ - return self.document.get_style('bold') - - @bold.setter - def bold(self, bold): - self.document.set_style(0, len(self.document.text), {'bold': bold}) - - @property - def italic(self): - """Italic font style. - - :type: bool - """ - return self.document.get_style('italic') - - @italic.setter - def italic(self, italic): - self.document.set_style(0, len(self.document.text), {'italic': italic}) - - def get_style(self, name): - """Get a document style value by name. - - If the document has more than one value of the named style, - `pyglet.text.document.STYLE_INDETERMINATE` is returned. - - :Parameters: - `name` : str - Style name to query. See documentation for - `pyglet.text.layout` for known style names. - - :rtype: object - """ - return self.document.get_style_range(name, 0, len(self.document.text)) - - def set_style(self, name, value): - """Set a document style value by name over the whole document. - - :Parameters: - `name` : str - Name of the style to set. See documentation for - `pyglet.text.layout` for known style names. - `value` : object - Value of the style. - - """ - self.document.set_style(0, len(self.document.text), {name: value}) - - def __del__(self): - self.delete() - - -class Label(DocumentLabel): - """Plain text label. - """ - - def __init__(self, text='', - font_name=None, font_size=None, bold=False, italic=False, stretch=False, - color=(255, 255, 255, 255), - x=0, y=0, z=0, width=None, height=None, - anchor_x='left', anchor_y='baseline', - align='left', - multiline=False, dpi=None, batch=None, group=None, rotation=0): - """Create a plain text label. - - :Parameters: - `text` : str - Text to display. - `font_name` : str or list - Font family name(s). If more than one name is given, the - first matching name is used. - `font_size` : float - Font size, in points. - `bold` : bool/str - Bold font style. - `italic` : bool/str - Italic font style. - `stretch` : bool/str - Stretch font style. - `color` : (int, int, int, int) - Font colour, as RGBA components in range [0, 255]. - `x` : int - X coordinate of the label. - `y` : int - Y coordinate of the label. - `z` : int - Z coordinate of the label. - `width` : int - Width of the label in pixels, or None - `height` : int - Height of the label in pixels, or None - `anchor_x` : str - Anchor point of the X coordinate: one of ``"left"``, - ``"center"`` or ``"right"``. - `anchor_y` : str - Anchor point of the Y coordinate: one of ``"bottom"``, - ``"baseline"``, ``"center"`` or ``"top"``. - `align` : str - Horizontal alignment of text on a line, only applies if - a width is supplied. One of ``"left"``, ``"center"`` - or ``"right"``. - `multiline` : bool - If True, the label will be word-wrapped and accept newline - characters. You must also set the width of the label. - `dpi` : float - Resolution of the fonts in this layout. Defaults to 96. - `batch` : `~pyglet.graphics.Batch` - Optional graphics batch to add the label to. - `group` : `~pyglet.graphics.Group` - Optional graphics group to use. - `rotation`: float - The amount to rotate the label in degrees. A positive amount - will be a clockwise rotation, negative values will result in - counter-clockwise rotation. - - """ - doc = decode_text(text) - super().__init__(doc, x, y, z, width, height, anchor_x, anchor_y, multiline, dpi, batch, group, rotation) - - self.document.set_style(0, len(self.document.text), { - 'font_name': font_name, - 'font_size': font_size, - 'bold': bold, - 'italic': italic, - 'stretch': stretch, - 'color': color, - 'align': align, - }) - - -class HTMLLabel(DocumentLabel): - """HTML formatted text label. - - A subset of HTML 4.01 is supported. See `pyglet.text.formats.html` for - details. - """ - - def __init__(self, text='', location=None, - x=0, y=0, z=0, width=None, height=None, - anchor_x='left', anchor_y='baseline', - multiline=False, dpi=None, batch=None, group=None, rotation=0): - """Create a label with an HTML string. - - :Parameters: - `text` : str - HTML formatted text to display. - `location` : `Location` - Location object for loading images referred to in the - document. By default, the working directory is used. - `x` : int - X coordinate of the label. - `y` : int - Y coordinate of the label. - `z` : int - Z coordinate of the label. - `width` : int - Width of the label in pixels, or None - `height` : int - Height of the label in pixels, or None - `anchor_x` : str - Anchor point of the X coordinate: one of ``"left"``, - ``"center"`` or ``"right"``. - `anchor_y` : str - Anchor point of the Y coordinate: one of ``"bottom"``, - ``"baseline"``, ``"center"`` or ``"top"``. - `multiline` : bool - If True, the label will be word-wrapped and render paragraph - and line breaks. You must also set the width of the label. - `dpi` : float - Resolution of the fonts in this layout. Defaults to 96. - `batch` : `~pyglet.graphics.Batch` - Optional graphics batch to add the label to. - `group` : `~pyglet.graphics.Group` - Optional graphics group to use. - `rotation`: float - The amount to rotate the label in degrees. A positive amount - will be a clockwise rotation, negative values will result in - counter-clockwise rotation. - - """ - self._text = text - self._location = location - doc = decode_html(text, location) - super().__init__(doc, x, y, z, width, height, anchor_x, anchor_y, multiline, dpi, batch, group, rotation) - - @property - def text(self): - """HTML formatted text of the label. - - :type: str - """ - return self._text - - @text.setter - def text(self, text): - self._text = text - self.document = decode_html(text, self._location) diff --git a/spaces/adirik/stylemc-demo/helpers.py b/spaces/adirik/stylemc-demo/helpers.py deleted file mode 100644 index f388ecb420a7db06981b766b80b951554daed613..0000000000000000000000000000000000000000 --- a/spaces/adirik/stylemc-demo/helpers.py +++ /dev/null @@ -1,119 +0,0 @@ -from collections import namedtuple -import torch -from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module - -""" -ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Flatten(Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -def l2_norm(input, axis=1): - norm = torch.norm(input, 2, axis, True) - output = torch.div(input, norm) - return output - - -class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): - """ A named tuple describing a ResNet block. """ - - -def get_block(in_channel, depth, num_units, stride=2): - return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] - - -def get_blocks(num_layers): - if num_layers == 50: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=4), - get_block(in_channel=128, depth=256, num_units=14), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 100: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=13), - get_block(in_channel=128, depth=256, num_units=30), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 152: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=8), - get_block(in_channel=128, depth=256, num_units=36), - get_block(in_channel=256, depth=512, num_units=3) - ] - else: - raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) - return blocks - - -class SEModule(Module): - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = AdaptiveAvgPool2d(1) - self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = ReLU(inplace=True) - self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = Sigmoid() - - def forward(self, x): - module_input = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class bottleneck_IR(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -class bottleneck_IR_SE(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR_SE, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), - PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), - BatchNorm2d(depth), - SEModule(depth, 16) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut \ No newline at end of file diff --git a/spaces/akashdhiman79830/MYGenAIVoice/README.md b/spaces/akashdhiman79830/MYGenAIVoice/README.md deleted file mode 100644 index 8c449003d7b5f40449d4c375887153aeb66476d9..0000000000000000000000000000000000000000 --- a/spaces/akashdhiman79830/MYGenAIVoice/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MYGenAIVoice -emoji: 🦀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/Mask2Former/mask2former_video/modeling/matcher.py b/spaces/akhaliq/Mask2Former/mask2former_video/modeling/matcher.py deleted file mode 100644 index 642f36022fae7c380d316ae4897220b741904db2..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former_video/modeling/matcher.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py -""" -Modules to compute the matching cost and solve the corresponding LSAP. -""" -import torch -import torch.nn.functional as F -from scipy.optimize import linear_sum_assignment -from torch import nn -from torch.cuda.amp import autocast - -from detectron2.projects.point_rend.point_features import point_sample - - -def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets) - denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :] - loss = 1 - (numerator + 1) / (denominator + 1) - return loss - - -batch_dice_loss_jit = torch.jit.script( - batch_dice_loss -) # type: torch.jit.ScriptModule - - -def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor): - """ - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - Returns: - Loss tensor - """ - hw = inputs.shape[1] - - pos = F.binary_cross_entropy_with_logits( - inputs, torch.ones_like(inputs), reduction="none" - ) - neg = F.binary_cross_entropy_with_logits( - inputs, torch.zeros_like(inputs), reduction="none" - ) - - loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum( - "nc,mc->nm", neg, (1 - targets) - ) - - return loss / hw - - -batch_sigmoid_ce_loss_jit = torch.jit.script( - batch_sigmoid_ce_loss -) # type: torch.jit.ScriptModule - - -class VideoHungarianMatcher(nn.Module): - """This class computes an assignment between the targets and the predictions of the network - - For efficiency reasons, the targets don't include the no_object. Because of this, in general, - there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, - while the others are un-matched (and thus treated as non-objects). - """ - - def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0): - """Creates the matcher - - Params: - cost_class: This is the relative weight of the classification error in the matching cost - cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost - cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost - """ - super().__init__() - self.cost_class = cost_class - self.cost_mask = cost_mask - self.cost_dice = cost_dice - - assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0" - - self.num_points = num_points - - @torch.no_grad() - def memory_efficient_forward(self, outputs, targets): - """More memory-friendly matching""" - bs, num_queries = outputs["pred_logits"].shape[:2] - - indices = [] - - # Iterate through batch size - for b in range(bs): - - out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes] - tgt_ids = targets[b]["labels"] - - # Compute the classification cost. Contrary to the loss, we don't use the NLL, - # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. - cost_class = -out_prob[:, tgt_ids] - - out_mask = outputs["pred_masks"][b] # [num_queries, T, H_pred, W_pred] - # gt masks are already padded when preparing target - tgt_mask = targets[b]["masks"].to(out_mask) # [num_gts, T, H_pred, W_pred] - - # out_mask = out_mask[:, None] - # tgt_mask = tgt_mask[:, None] - # all masks share the same set of points for efficient matching! - point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device) - # get gt labels - tgt_mask = point_sample( - tgt_mask, - point_coords.repeat(tgt_mask.shape[0], 1, 1), - align_corners=False, - ).flatten(1) - - out_mask = point_sample( - out_mask, - point_coords.repeat(out_mask.shape[0], 1, 1), - align_corners=False, - ).flatten(1) - - with autocast(enabled=False): - out_mask = out_mask.float() - tgt_mask = tgt_mask.float() - # Compute the focal loss between masks - cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask) - - # Compute the dice loss betwen masks - cost_dice = batch_dice_loss_jit(out_mask, tgt_mask) - - # Final cost matrix - C = ( - self.cost_mask * cost_mask - + self.cost_class * cost_class - + self.cost_dice * cost_dice - ) - C = C.reshape(num_queries, -1).cpu() - - indices.append(linear_sum_assignment(C)) - - return [ - (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) - for i, j in indices - ] - - @torch.no_grad() - def forward(self, outputs, targets): - """Performs the matching - - Params: - outputs: This is a dict that contains at least these entries: - "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits - "pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks - - targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: - "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth - objects in the target) containing the class labels - "masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks - - Returns: - A list of size batch_size, containing tuples of (index_i, index_j) where: - - index_i is the indices of the selected predictions (in order) - - index_j is the indices of the corresponding selected targets (in order) - For each batch element, it holds: - len(index_i) = len(index_j) = min(num_queries, num_target_boxes) - """ - return self.memory_efficient_forward(outputs, targets) - - def __repr__(self, _repr_indent=4): - head = "Matcher " + self.__class__.__name__ - body = [ - "cost_class: {}".format(self.cost_class), - "cost_mask: {}".format(self.cost_mask), - "cost_dice: {}".format(self.cost_dice), - ] - lines = [head] + [" " * _repr_indent + line for line in body] - return "\n".join(lines) diff --git a/spaces/akhaliq/deeplab2/__init__.py b/spaces/akhaliq/deeplab2/__init__.py deleted file mode 100644 index 35e4ce02ff422f3aa84ab644b88d65b13e0cbc03..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/spaces/akhaliq/deeplab2/trainer/vis_utils.py b/spaces/akhaliq/deeplab2/trainer/vis_utils.py deleted file mode 100644 index 1e4d96ffef18ecbc4943fe8aa3fccaaad26e5946..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/trainer/vis_utils.py +++ /dev/null @@ -1,613 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utility functions for the visualizer.""" -from absl import logging -import matplotlib.pyplot as plt -import numpy as np -import PIL -import tensorflow as tf - -from deeplab2.data import coco_constants - -# Amount of color perturbation added to colormap. -_COLOR_PERTURBATION = 60 - - -def bit_get(val, idx): - """Gets the bit value. - - Args: - val: Input value, int or numpy int array. - idx: Which bit of the input val. - - Returns: - The "idx"-th bit of input val. - """ - return (val >> idx) & 1 - - -def create_pascal_label_colormap(): - """Creates a label colormap used in PASCAL VOC segmentation benchmark. - - Returns: - A colormap for visualizing segmentation results. - """ - colormap = np.zeros((512, 3), dtype=int) - ind = np.arange(512, dtype=int) - - for shift in reversed(list(range(8))): - for channel in range(3): - colormap[:, channel] |= bit_get(ind, channel) << shift - ind >>= 3 - - return colormap - - -def create_rgb_from_instance_map(instance_map): - """Creates an RGB image from an instance map for visualization. - - To assign a color to each instance, if the maximum value of the instance - labels is smaller than the maximum allowed value of Pascal's colormap, we use - Pascal's colormap. Otherwise, we use random and non-repeated colors. - - Args: - instance_map: Numpy array of shape `[height, width]`, the instance map. - - Returns: - instance_image: Numpy array of shape `[height, width, 3]`, the visualized - RGB instance image. - """ - # pylint: disable=protected-access - if np.max(instance_map) < 512: - colormap = create_pascal_label_colormap() - instance_image = colormap[instance_map] - else: - np.random.seed(0) - - used_colors = [(0, 0, 0)] - instanc_map_shape = instance_map.shape - instance_image = np.zeros([instanc_map_shape[0], instanc_map_shape[1], 3], - np.uint8) - instance_ids = np.unique(instance_map) - for instance_id in instance_ids: - # We preserve the id "0" for stuff. - if instance_id == 0: - continue - r = np.random.randint(0, 256, dtype=np.uint8) - g = np.random.randint(0, 256, dtype=np.uint8) - b = np.random.randint(0, 256, dtype=np.uint8) - while (r, g, b) in used_colors: - r = np.random.randint(0, 256, dtype=np.uint8) - g = np.random.randint(0, 256, dtype=np.uint8) - b = np.random.randint(0, 256, dtype=np.uint8) - instance_image[instance_map == instance_id, :] = (r, g, b) - used_colors.append((r, g, b)) - instance_image[instance_map == 0, :] = (0, 0, 0) - - return instance_image - - -def _generate_color(used_colors): - """"Generates a non-repeated color. - - This function first uses the pascal colormap to generate the color. If more - colors are requested, it randomly generates a non-repeated color. - - Args: - used_colors: A list, where each element is a tuple in the format of - (r, g, b). - - Returns: - A tuple representing a color in the format of (r, g, b). - A list, which is the updated `used_colors` with the returned color tuple - appended to it. - """ - - pascal_colormap = create_pascal_label_colormap() - - if len(used_colors) < len(pascal_colormap): - color = tuple(pascal_colormap[len(used_colors)]) - else: - r = np.random.randint(0, 256, dtype=np.uint8) - g = np.random.randint(0, 256, dtype=np.uint8) - b = np.random.randint(0, 256, dtype=np.uint8) - while (r, g, b) in used_colors: - r = np.random.randint(0, 256, dtype=np.uint8) - g = np.random.randint(0, 256, dtype=np.uint8) - b = np.random.randint(0, 256, dtype=np.uint8) - color = (r, g, b) - used_colors.append(color) - - return color, used_colors - - -def overlay_heatmap_on_image(heatmap, - input_image, - dpi=80.0, - add_color_bar=False): - """Overlays a heatmap on top of an image. - - Args: - heatmap: A numpy array (float32) of shape `[height, width]`, - which is the heatmap of keypoints. - input_image: A numpy array (float32 or uint8) of shape - `[height, width, 3]`, which is an image and all the pixel values are in - the range of [0.0, 255.0]. - dpi: Float, the dpi of the output image. - add_color_bar: Boolean, whether to add a colorbar to the output image. - - Returns: - A numpy array (uint8) of the same shape as the `input_image`. - """ - - # Generate the cmap. - cmap = plt.cm.Reds - # pylint: disable=protected-access - cmap._init() - # pylint: disable=protected-access - cmap._lut[:, -1] = np.linspace(0, 1.0, 259) - - # Plot. - image = input_image.astype(np.float32) / 255.0 - image_height, image_width, _ = image.shape - fig, ax = plt.subplots( - 1, - 1, - facecolor='white', - figsize=(image_width / dpi, image_height / dpi), - dpi=dpi) - grid_y, grid_x = np.mgrid[0:image_height, 0:image_width] - cb = ax.contourf(grid_x, grid_y, heatmap, 10, cmap=cmap) - ax.imshow(image) - ax.grid(False) - plt.axis('off') - if add_color_bar: - plt.colorbar(cb) - fig.subplots_adjust(bottom=0) - fig.subplots_adjust(top=1) - fig.subplots_adjust(right=1) - fig.subplots_adjust(left=0) - - # Get the output image. - fig.canvas.draw() - # pylint: disable=protected-access - output_image = np.array(fig.canvas.renderer._renderer)[:, :, :-1] - plt.close() - - return output_image - - -# pylint: disable=invalid-name -def make_colorwheel(): - """Generates a color wheel for optical flow visualization. - - Reference implementation: - https://github.com/tomrunia/OpticalFlow_Visualization - - Returns: - flow_image: A numpy array of output image. - """ - - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - colorwheel = np.zeros((ncols, 3)) - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) - col = col + RY - # YG - colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) - colorwheel[col:col + YG, 1] = 255 - col = col + YG - # GC - colorwheel[col:col + GC, 1] = 255 - colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) - col = col + GC - # CB - colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) - colorwheel[col:col+CB, 2] = 255 - col = col+CB - # BM - colorwheel[col:col + BM, 2] = 255 - colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) - col = col + BM - # MR - colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) - colorwheel[col:col+MR, 0] = 255 - return colorwheel -# pylint: enable=invalid-name - - -def flow_compute_color(u, v): - """Computes color for 2D flow field. - - Reference implementation: - https://github.com/tomrunia/OpticalFlow_Visualization - - Args: - u: A numpy array of horizontal flow. - v: A numpy array of vertical flow. - - Returns: - flow_image: A numpy array of output image. - """ - - flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) - - colorwheel = make_colorwheel() # shape [55x3] - ncols = colorwheel.shape[0] - - rad = np.sqrt(np.square(u) + np.square(v)) - a = np.arctan2(-v, -u) / np.pi - - fk = (a + 1) / 2 * (ncols - 1) - k0 = np.floor(fk).astype(np.int32) - k1 = k0 + 1 - k1[k1 == ncols] = 0 - f = fk - k0 - - for i in range(colorwheel.shape[1]): - tmp = colorwheel[:, i] - color0 = tmp[k0] / 255.0 - color1 = tmp[k1] / 255.0 - color = (1 - f) * color0 + f * color1 - - idx = (rad <= 1) - color[idx] = 1 - rad[idx] * (1 - color[idx]) - color[~idx] = color[~idx] * 0.75 - - # The order is RGB. - ch_idx = i - flow_image[:, :, ch_idx] = np.floor(255 * color) - - return flow_image - - -def flow_to_color(flow_uv, clip_flow=None): - """Applies color to 2D flow field. - - Reference implementation: - https://github.com/tomrunia/OpticalFlow_Visualization - - Args: - flow_uv: A numpy array of flow with shape [Height, Width, 2]. - clip_flow: A float to clip the maximum value for the flow. - - Returns: - flow_image: A numpy array of output image. - - Raises: - ValueError: Input flow does not have dimension equals to 3. - ValueError: Input flow does not have shape [H, W, 2]. - """ - - if flow_uv.ndim != 3: - raise ValueError('Input flow must have three dimensions.') - if flow_uv.shape[2] != 2: - raise ValueError('Input flow must have shape [H, W, 2].') - - if clip_flow is not None: - flow_uv = np.clip(flow_uv, 0, clip_flow) - - u = flow_uv[:, :, 0] - v = flow_uv[:, :, 1] - - rad = np.sqrt(np.square(u) + np.square(v)) - rad_max = np.max(rad) - - epsilon = 1e-5 - u = u / (rad_max + epsilon) - v = v / (rad_max + epsilon) - - return flow_compute_color(u, v) - - -def squeeze_batch_dim_and_convert_to_numpy(input_dict): - for key in input_dict: - input_dict[key] = tf.squeeze(input_dict[key], axis=0).numpy() - return input_dict - - -def create_cityscapes_label_colormap(): - """Creates a label colormap used in CITYSCAPES segmentation benchmark. - - Returns: - A colormap for visualizing segmentation results. - """ - colormap = np.zeros((256, 3), dtype=np.uint8) - colormap[0] = [128, 64, 128] - colormap[1] = [244, 35, 232] - colormap[2] = [70, 70, 70] - colormap[3] = [102, 102, 156] - colormap[4] = [190, 153, 153] - colormap[5] = [153, 153, 153] - colormap[6] = [250, 170, 30] - colormap[7] = [220, 220, 0] - colormap[8] = [107, 142, 35] - colormap[9] = [152, 251, 152] - colormap[10] = [70, 130, 180] - colormap[11] = [220, 20, 60] - colormap[12] = [255, 0, 0] - colormap[13] = [0, 0, 142] - colormap[14] = [0, 0, 70] - colormap[15] = [0, 60, 100] - colormap[16] = [0, 80, 100] - colormap[17] = [0, 0, 230] - colormap[18] = [119, 11, 32] - return colormap - - -def create_motchallenge_label_colormap(): - """Creates a label colormap used in MOTChallenge-STEP benchmark. - - Returns: - A colormap for visualizing segmentation results. - """ - colormap = np.zeros((256, 3), dtype=np.uint8) - colormap[0] = [244, 35, 232] - colormap[1] = [70, 70, 70] - colormap[2] = [107, 142, 35] - colormap[3] = [70, 130, 180] - colormap[4] = [220, 20, 60] - colormap[5] = [255, 0, 0] - colormap[6] = [119, 11, 32] - return colormap - - -def create_coco_label_colormap(): - """Creates a label colormap used in COCO dataset. - - Returns: - A colormap for visualizing segmentation results. - """ - # Obtain the dictionary mapping original category id to contiguous ones. - coco_categories = coco_constants.get_coco_reduced_meta() - colormap = np.zeros((256, 3), dtype=np.uint8) - for category in coco_categories: - colormap[category['id']] = category['color'] - return colormap - - -def label_to_color_image(label, colormap_name='cityscapes'): - """Adds color defined by the colormap derived from the dataset to the label. - - Args: - label: A 2D array with integer type, storing the segmentation label. - colormap_name: A string specifying the name of the dataset. Used for - choosing the right colormap. Currently supported: 'cityscapes', - 'motchallenge'. (Default: 'cityscapes') - - Returns: - result: A 2D array with floating type. The element of the array - is the color indexed by the corresponding element in the input label - to the cityscapes colormap. - - Raises: - ValueError: If label is not of rank 2 or its value is larger than color - map maximum entry. - """ - if label.ndim != 2: - raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) - - if np.max(label) >= 256: - raise ValueError( - 'label value too large: {} >= 256.'.format(np.max(label))) - - if colormap_name == 'cityscapes': - colormap = create_cityscapes_label_colormap() - elif colormap_name == 'motchallenge': - colormap = create_motchallenge_label_colormap() - elif colormap_name == 'coco': - colormap = create_coco_label_colormap() - else: - raise ValueError('Could not find a colormap for dataset %s.' % - colormap_name) - return colormap[label] - - -def save_parsing_result(parsing_result, - label_divisor, - thing_list, - save_dir, - filename, - id_to_colormap=None, - colormap_name='cityscapes'): - """Saves the parsing results. - - The parsing result encodes both semantic segmentation and instance - segmentation results. In order to visualize the parsing result with only - one png file, we adopt the following procedures, similar to the - `visualization.py` provided in the COCO panoptic segmentation evaluation - codes. - - 1. Pixels predicted as `stuff` will take the same semantic color defined - in the colormap. - 2. Pixels of a predicted `thing` instance will take similar semantic color - defined in the colormap. For example, `car` class takes blue color in - the colormap. Predicted car instance 1 will then be colored with the - blue color perturbed with a small amount of RGB noise. - - Args: - parsing_result: The numpy array to be saved. The data will be converted - to uint8 and saved as png image. - label_divisor: Integer, encoding the semantic segmentation and instance - segmentation results as value = semantic_label * label_divisor + - instance_label. - thing_list: A list containing the semantic indices of the thing classes. - save_dir: String, the directory to which the results will be saved. - filename: String, the image filename. - id_to_colormap: An optional mapping from track ID to color. - colormap_name: A string specifying the dataset to choose the corresponding - color map. Currently supported: 'cityscapes', 'motchallenge'. (Default: - 'cityscapes'). - - Raises: - ValueError: If parsing_result is not of rank 2 or its value in semantic - segmentation result is larger than color map maximum entry. - ValueError: If provided colormap_name is not supported. - - Returns: - If id_to_colormap is passed, the updated id_to_colormap will be returned. - """ - if parsing_result.ndim != 2: - raise ValueError('Expect 2-D parsing result. Got {}'.format( - parsing_result.shape)) - semantic_result = parsing_result // label_divisor - instance_result = parsing_result % label_divisor - colormap_max_value = 256 - if np.max(semantic_result) >= colormap_max_value: - raise ValueError('Predicted semantic value too large: {} >= {}.'.format( - np.max(semantic_result), colormap_max_value)) - height, width = parsing_result.shape - colored_output = np.zeros((height, width, 3), dtype=np.uint8) - if colormap_name == 'cityscapes': - colormap = create_cityscapes_label_colormap() - elif colormap_name == 'motchallenge': - colormap = create_motchallenge_label_colormap() - elif colormap_name == 'coco': - colormap = create_coco_label_colormap() - else: - raise ValueError('Could not find a colormap for dataset %s.' % - colormap_name) - # Keep track of used colors. - used_colors = set() - if id_to_colormap is not None: - used_colors = set([tuple(val) for val in id_to_colormap.values()]) - np_state = None - else: - # Use random seed 0 in order to reproduce the same visualization. - np_state = np.random.RandomState(0) - - unique_semantic_values = np.unique(semantic_result) - for semantic_value in unique_semantic_values: - semantic_mask = semantic_result == semantic_value - if semantic_value in thing_list: - # For `thing` class, we will add a small amount of random noise to its - # correspondingly predefined semantic segmentation colormap. - unique_instance_values = np.unique(instance_result[semantic_mask]) - for instance_value in unique_instance_values: - instance_mask = np.logical_and(semantic_mask, - instance_result == instance_value) - if id_to_colormap is not None: - if instance_value in id_to_colormap: - colored_output[instance_mask] = id_to_colormap[instance_value] - continue - random_color = perturb_color( - colormap[semantic_value], - _COLOR_PERTURBATION, - used_colors, - random_state=np_state) - colored_output[instance_mask] = random_color - if id_to_colormap is not None: - id_to_colormap[instance_value] = random_color - else: - # For `stuff` class, we use the defined semantic color. - colored_output[semantic_mask] = colormap[semantic_value] - used_colors.add(tuple(colormap[semantic_value])) - - pil_image = PIL.Image.fromarray(colored_output.astype(dtype=np.uint8)) - with tf.io.gfile.GFile('{}/{}.png'.format(save_dir, filename), mode='w') as f: - pil_image.save(f, 'PNG') - if id_to_colormap is not None: - return id_to_colormap - - -def perturb_color(color, - noise, - used_colors=None, - max_trials=50, - random_state=None): - """Pertrubs the color with some noise. - - If `used_colors` is not None, we will return the color that has - not appeared before in it. - - Args: - color: A numpy array with three elements [R, G, B]. - noise: Integer, specifying the amount of perturbing noise. - used_colors: A set, used to keep track of used colors. - max_trials: An integer, maximum trials to generate random color. - random_state: An optional np.random.RandomState. If passed, will be used to - generate random numbers. - - Returns: - A perturbed color that has not appeared in used_colors. - """ - for _ in range(max_trials): - if random_state is not None: - random_color = color + random_state.randint( - low=-noise, high=noise + 1, size=3) - else: - random_color = color + np.random.randint(low=-noise, - high=noise+1, - size=3) - random_color = np.maximum(0, np.minimum(255, random_color)) - if used_colors is None: - return random_color - elif tuple(random_color) not in used_colors: - used_colors.add(tuple(random_color)) - return random_color - logging.warning('Using duplicate random color.') - return random_color - - -def save_annotation(label, - save_dir, - filename, - add_colormap=True, - normalize_to_unit_values=False, - scale_values=False, - colormap_name='cityscapes'): - """Saves the given label to image on disk. - - Args: - label: The numpy array to be saved. The data will be converted - to uint8 and saved as png image. - save_dir: String, the directory to which the results will be saved. - filename: String, the image filename. - add_colormap: Boolean, add color map to the label or not. - normalize_to_unit_values: Boolean, normalize the input values to [0, 1]. - scale_values: Boolean, scale the input values to [0, 255] for visualization. - colormap_name: A string specifying the dataset to choose the corresponding - color map. Currently supported: 'cityscapes', 'motchallenge'. (Default: - 'cityscapes'). - """ - # Add colormap for visualizing the prediction. - if add_colormap: - colored_label = label_to_color_image(label, colormap_name) - else: - colored_label = label - if normalize_to_unit_values: - min_value = np.amin(colored_label) - max_value = np.amax(colored_label) - range_value = max_value - min_value - if range_value != 0: - colored_label = (colored_label - min_value) / range_value - - if scale_values: - colored_label = 255. * colored_label - - pil_image = PIL.Image.fromarray(colored_label.astype(dtype=np.uint8)) - with tf.io.gfile.GFile('%s/%s.png' % (save_dir, filename), mode='w') as f: - pil_image.save(f, 'PNG') diff --git a/spaces/akhaliq/mdetr/README.md b/spaces/akhaliq/mdetr/README.md deleted file mode 100644 index 552e82adf2defd9bece24670d9d62ded380b2c74..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/mdetr/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Mdetr -emoji: 🌍 -colorFrom: green -colorTo: pink -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/riffusion-riffusion-model-v1/README.md b/spaces/akhaliq/riffusion-riffusion-model-v1/README.md deleted file mode 100644 index 0839add59615da19ae7f1569dd01f3b2719b9631..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/riffusion-riffusion-model-v1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Riffusion Riffusion Model V1 -emoji: 📈 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.13.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alamin655/websurfx/src/results/mod.rs b/spaces/alamin655/websurfx/src/results/mod.rs deleted file mode 100644 index 9ec3229308d4764f7a92e135fc5ec6868c609e94..0000000000000000000000000000000000000000 --- a/spaces/alamin655/websurfx/src/results/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! This module provides modules that handle the functionality to aggregate the fetched search -//! results from the upstream search engines and filters it if safe search is set to 3 or 4. Also, -//! provides various models to aggregate search results into a standardized form. - -pub mod aggregator; -pub mod user_agent; diff --git a/spaces/alex-mindspace/gpt-agents/app.py b/spaces/alex-mindspace/gpt-agents/app.py deleted file mode 100644 index 03782e56decdc0e323b794c7ae97964b1cb0fb1e..0000000000000000000000000000000000000000 --- a/spaces/alex-mindspace/gpt-agents/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import sys -import gradio as gr -sys.path.append('.') - -from gradio_app.interface import create_gradio_interface - -def greet(name): - return "Hello " + name - -""" -Define the entry point for the application. -""" -demo = create_gradio_interface() -demo.launch(share=False) \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test203/README.md b/spaces/allknowingroger/Image-Models-Test203/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test203/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test61/app.py b/spaces/allknowingroger/Image-Models-Test61/app.py deleted file mode 100644 index e1e9dc8144fc52b4f1232f74b94ef019b6e7aa7c..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test61/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "stillerman/trdne250", - "stillerman/this-rug-does-not-exist", - "artificialguybr/360Redmond", - "cgburgos/sdxl-1-0-base", - "bharadwajkg/finetune-sd2-1-planogram-lora-nocrop-data7", - "digiplay/CounterMix_v1", - "digiplay/YabaLMixTrue25D_V2.0", - "digiplay/2-KWI", - "lberglund/sweep_final_0_20231013091146", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/alvanlii/domain-expansion/expansion_utils/consts.py b/spaces/alvanlii/domain-expansion/expansion_utils/consts.py deleted file mode 100644 index a4c6835ce2fb8fdcc2caeee8f92f87e3d535e5bd..0000000000000000000000000000000000000000 --- a/spaces/alvanlii/domain-expansion/expansion_utils/consts.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright 2023 Adobe Research. All rights reserved. -# To view a copy of the license, visit LICENSE.md. - -LATENT_DIM = 512 -EVAL_SIZE = 100 \ No newline at end of file diff --git a/spaces/amsterdamNLP/CLIP-attention-rollout/clip_grounding/utils/image.py b/spaces/amsterdamNLP/CLIP-attention-rollout/clip_grounding/utils/image.py deleted file mode 100644 index 0406e52a1bc76cf1432205934eff84e0f8d43dd1..0000000000000000000000000000000000000000 --- a/spaces/amsterdamNLP/CLIP-attention-rollout/clip_grounding/utils/image.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Image operations.""" -from copy import deepcopy -from PIL import Image - - -def center_crop(im: Image): - width, height = im.size - new_width = width if width < height else height - new_height = height if height < width else width - - left = (width - new_width)/2 - top = (height - new_height)/2 - right = (width + new_width)/2 - bottom = (height + new_height)/2 - - # Crop the center of the image - im = im.crop((left, top, right, bottom)) - - return im - - -def pad_to_square(im: Image, color=(0, 0, 0)): - im = deepcopy(im) - width, height = im.size - - vert_pad = (max(width, height) - height) // 2 - hor_pad = (max(width, height) - width) // 2 - - if len(im.mode) == 3: - color = (0, 0, 0) - elif len(im.mode) == 1: - color = 0 - else: - raise ValueError(f"Image mode not supported. Image has {im.mode} channels.") - - return add_margin(im, vert_pad, hor_pad, vert_pad, hor_pad, color=color) - - -def add_margin(pil_img, top, right, bottom, left, color=(0, 0, 0)): - """Ref: https://note.nkmk.me/en/python-pillow-add-margin-expand-canvas/""" - width, height = pil_img.size - new_width = width + right + left - new_height = height + top + bottom - result = Image.new(pil_img.mode, (new_width, new_height), color) - result.paste(pil_img, (left, top)) - return result diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py deleted file mode 100644 index 8e03c7f898988c237c714ed949610f5035b30b50..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ /dev/null @@ -1,286 +0,0 @@ -# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo -# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo -# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder - -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.util import instantiate_from_config - -import ldm.models.autoencoder - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - -setattr(ldm.models.autoencoder, "VQModel", VQModel) -setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Signature/pkcs1_15.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Signature/pkcs1_15.py deleted file mode 100644 index 54a4bf7cc9d1e568d0a6f9a95226c6f2087c2a35..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Signature/pkcs1_15.py +++ /dev/null @@ -1,222 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import Crypto.Util.number -from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes -from Crypto.Util.asn1 import DerSequence, DerNull, DerOctetString, DerObjectId - -class PKCS115_SigScheme: - """A signature object for ``RSASSA-PKCS1-v1_5``. - Do not instantiate directly. - Use :func:`Crypto.Signature.pkcs1_15.new`. - """ - - def __init__(self, rsa_key): - """Initialize this PKCS#1 v1.5 signature scheme object. - - :Parameters: - rsa_key : an RSA key object - Creation of signatures is only possible if this is a *private* - RSA key. Verification of signatures is always possible. - """ - self._key = rsa_key - - def can_sign(self): - """Return ``True`` if this object can be used to sign messages.""" - return self._key.has_private() - - def sign(self, msg_hash): - """Create the PKCS#1 v1.5 signature of a message. - - This function is also called ``RSASSA-PKCS1-V1_5-SIGN`` and - it is specified in - `section 8.2.1 of RFC8017 `_. - - :parameter msg_hash: - This is an object from the :mod:`Crypto.Hash` package. - It has been used to digest the message to sign. - :type msg_hash: hash object - - :return: the signature encoded as a *byte string*. - :raise ValueError: if the RSA key is not long enough for the given hash algorithm. - :raise TypeError: if the RSA key has no private half. - """ - - # See 8.2.1 in RFC3447 - modBits = Crypto.Util.number.size(self._key.n) - k = ceil_div(modBits,8) # Convert from bits to bytes - - # Step 1 - em = _EMSA_PKCS1_V1_5_ENCODE(msg_hash, k) - # Step 2a (OS2IP) - em_int = bytes_to_long(em) - # Step 2b (RSASP1) - m_int = self._key._decrypt(em_int) - # Step 2c (I2OSP) - signature = long_to_bytes(m_int, k) - return signature - - def verify(self, msg_hash, signature): - """Check if the PKCS#1 v1.5 signature over a message is valid. - - This function is also called ``RSASSA-PKCS1-V1_5-VERIFY`` and - it is specified in - `section 8.2.2 of RFC8037 `_. - - :parameter msg_hash: - The hash that was carried out over the message. This is an object - belonging to the :mod:`Crypto.Hash` module. - :type parameter: hash object - - :parameter signature: - The signature that needs to be validated. - :type signature: byte string - - :raise ValueError: if the signature is not valid. - """ - - # See 8.2.2 in RFC3447 - modBits = Crypto.Util.number.size(self._key.n) - k = ceil_div(modBits, 8) # Convert from bits to bytes - - # Step 1 - if len(signature) != k: - raise ValueError("Invalid signature") - # Step 2a (O2SIP) - signature_int = bytes_to_long(signature) - # Step 2b (RSAVP1) - em_int = self._key._encrypt(signature_int) - # Step 2c (I2OSP) - em1 = long_to_bytes(em_int, k) - # Step 3 - try: - possible_em1 = [ _EMSA_PKCS1_V1_5_ENCODE(msg_hash, k, True) ] - # MD2/4/5 hashes always require NULL params in AlgorithmIdentifier. - # For all others, it is optional. - try: - algorithm_is_md = msg_hash.oid.startswith('1.2.840.113549.2.') - except AttributeError: - algorithm_is_md = False - if not algorithm_is_md: # MD2/MD4/MD5 - possible_em1.append(_EMSA_PKCS1_V1_5_ENCODE(msg_hash, k, False)) - except ValueError: - raise ValueError("Invalid signature") - # Step 4 - # By comparing the full encodings (as opposed to checking each - # of its components one at a time) we avoid attacks to the padding - # scheme like Bleichenbacher's (see http://www.mail-archive.com/cryptography@metzdowd.com/msg06537). - # - if em1 not in possible_em1: - raise ValueError("Invalid signature") - pass - - -def _EMSA_PKCS1_V1_5_ENCODE(msg_hash, emLen, with_hash_parameters=True): - """ - Implement the ``EMSA-PKCS1-V1_5-ENCODE`` function, as defined - in PKCS#1 v2.1 (RFC3447, 9.2). - - ``_EMSA-PKCS1-V1_5-ENCODE`` actually accepts the message ``M`` as input, - and hash it internally. Here, we expect that the message has already - been hashed instead. - - :Parameters: - msg_hash : hash object - The hash object that holds the digest of the message being signed. - emLen : int - The length the final encoding must have, in bytes. - with_hash_parameters : bool - If True (default), include NULL parameters for the hash - algorithm in the ``digestAlgorithm`` SEQUENCE. - - :attention: the early standard (RFC2313) stated that ``DigestInfo`` - had to be BER-encoded. This means that old signatures - might have length tags in indefinite form, which - is not supported in DER. Such encoding cannot be - reproduced by this function. - - :Return: An ``emLen`` byte long string that encodes the hash. - """ - - # First, build the ASN.1 DER object DigestInfo: - # - # DigestInfo ::= SEQUENCE { - # digestAlgorithm AlgorithmIdentifier, - # digest OCTET STRING - # } - # - # where digestAlgorithm identifies the hash function and shall be an - # algorithm ID with an OID in the set PKCS1-v1-5DigestAlgorithms. - # - # PKCS1-v1-5DigestAlgorithms ALGORITHM-IDENTIFIER ::= { - # { OID id-md2 PARAMETERS NULL }| - # { OID id-md5 PARAMETERS NULL }| - # { OID id-sha1 PARAMETERS NULL }| - # { OID id-sha256 PARAMETERS NULL }| - # { OID id-sha384 PARAMETERS NULL }| - # { OID id-sha512 PARAMETERS NULL } - # } - # - # Appendix B.1 also says that for SHA-1/-2 algorithms, the parameters - # should be omitted. They may be present, but when they are, they shall - # have NULL value. - - digestAlgo = DerSequence([ DerObjectId(msg_hash.oid).encode() ]) - - if with_hash_parameters: - digestAlgo.append(DerNull().encode()) - - digest = DerOctetString(msg_hash.digest()) - digestInfo = DerSequence([ - digestAlgo.encode(), - digest.encode() - ]).encode() - - # We need at least 11 bytes for the remaining data: 3 fixed bytes and - # at least 8 bytes of padding). - if emLen= 3 - bi = PyImport_ImportModule("builtins"); -#else - bi = PyImport_ImportModule("__builtin__"); -#endif - if (bi == NULL) - goto error; - PyDict_SetItemString(result, "__builtins__", bi); - Py_DECREF(bi); - - x = PyRun_String( - "import sys\n" - "class FileLike:\n" - " def write(self, x):\n" - " try:\n" - " of.write(x)\n" - " except: pass\n" - " self.buf += x\n" - " def flush(self):\n" - " pass\n" - "fl = FileLike()\n" - "fl.buf = ''\n" - "of = sys.stderr\n" - "sys.stderr = fl\n" - "def done():\n" - " sys.stderr = of\n" - " return fl.buf\n", /* make sure the returned value stays alive */ - Py_file_input, - result, result); - Py_XDECREF(x); - - error: - if (PyErr_Occurred()) - { - PyErr_WriteUnraisable(Py_None); - PyErr_Clear(); - } - return result; -} - -#pragma comment(lib, "user32.lib") - -static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) -{ - Sleep(666); /* may be interrupted if the whole process is closing */ -#if PY_MAJOR_VERSION >= 3 - MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, - L"Python-CFFI error", - MB_OK | MB_ICONERROR); -#else - MessageBoxA(NULL, (char *)_cffi_bootstrap_text, - "Python-CFFI error", - MB_OK | MB_ICONERROR); -#endif - _cffi_bootstrap_text = NULL; - return 0; -} - -static void _cffi_stop_error_capture(PyObject *ecap) -{ - PyObject *s; - void *text; - - if (ecap == (PyObject *)1) - return; - - if (ecap == NULL) - goto error; - - s = PyRun_String("done()", Py_eval_input, ecap, ecap); - if (s == NULL) - goto error; - - /* Show a dialog box, but in a background thread, and - never show multiple dialog boxes at once. */ -#if PY_MAJOR_VERSION >= 3 - text = PyUnicode_AsWideCharString(s, NULL); -#else - text = PyString_AsString(s); -#endif - - _cffi_bootstrap_text = text; - - if (text != NULL) - { - HANDLE h; - h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, - NULL, 0, NULL); - if (h != NULL) - CloseHandle(h); - } - /* decref the string, but it should stay alive as 'fl.buf' - in the small module above. It will really be freed only if - we later get another similar error. So it's a leak of at - most one copy of the small module. That's fine for this - situation which is usually a "fatal error" anyway. */ - Py_DECREF(s); - PyErr_Clear(); - return; - - error: - _cffi_bootstrap_text = NULL; - PyErr_Clear(); -} - -#else - -static PyObject *_cffi_start_error_capture(void) { return NULL; } -static void _cffi_stop_error_capture(PyObject *ecap) { } - -#endif diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/core.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/core.py deleted file mode 100644 index 5abfb0f3c2f872275962732b370fed1202f1144a..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/core.py +++ /dev/null @@ -1,2998 +0,0 @@ -import enum -import errno -import inspect -import os -import sys -import typing as t -from collections import abc -from contextlib import contextmanager -from contextlib import ExitStack -from functools import partial -from functools import update_wrapper -from gettext import gettext as _ -from gettext import ngettext -from itertools import repeat - -from . import types -from .exceptions import Abort -from .exceptions import BadParameter -from .exceptions import ClickException -from .exceptions import Exit -from .exceptions import MissingParameter -from .exceptions import UsageError -from .formatting import HelpFormatter -from .formatting import join_options -from .globals import pop_context -from .globals import push_context -from .parser import _flag_needs_value -from .parser import OptionParser -from .parser import split_opt -from .termui import confirm -from .termui import prompt -from .termui import style -from .utils import _detect_program_name -from .utils import _expand_args -from .utils import echo -from .utils import make_default_short_help -from .utils import make_str -from .utils import PacifyFlushWrapper - -if t.TYPE_CHECKING: - import typing_extensions as te - from .shell_completion import CompletionItem - -F = t.TypeVar("F", bound=t.Callable[..., t.Any]) -V = t.TypeVar("V") - - -def _complete_visible_commands( - ctx: "Context", incomplete: str -) -> t.Iterator[t.Tuple[str, "Command"]]: - """List all the subcommands of a group that start with the - incomplete value and aren't hidden. - - :param ctx: Invocation context for the group. - :param incomplete: Value being completed. May be empty. - """ - multi = t.cast(MultiCommand, ctx.command) - - for name in multi.list_commands(ctx): - if name.startswith(incomplete): - command = multi.get_command(ctx, name) - - if command is not None and not command.hidden: - yield name, command - - -def _check_multicommand( - base_command: "MultiCommand", cmd_name: str, cmd: "Command", register: bool = False -) -> None: - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = ( - "It is not possible to add multi commands as children to" - " another multi command that is in chain mode." - ) - else: - hint = ( - "Found a multi command as subcommand to a multi command" - " that is in chain mode. This is not supported." - ) - raise RuntimeError( - f"{hint}. Command {base_command.name!r} is set to chain and" - f" {cmd_name!r} was added as a subcommand but it in itself is a" - f" multi command. ({cmd_name!r} is a {type(cmd).__name__}" - f" within a chained {type(base_command).__name__} named" - f" {base_command.name!r})." - ) - - -def batch(iterable: t.Iterable[V], batch_size: int) -> t.List[t.Tuple[V, ...]]: - return list(zip(*repeat(iter(iterable), batch_size))) - - -@contextmanager -def augment_usage_errors( - ctx: "Context", param: t.Optional["Parameter"] = None -) -> t.Iterator[None]: - """Context manager that attaches extra information to exceptions.""" - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing( - invocation_order: t.Sequence["Parameter"], - declaration_order: t.Sequence["Parameter"], -) -> t.List["Parameter"]: - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - - def sort_key(item: "Parameter") -> t.Tuple[bool, float]: - try: - idx: float = invocation_order.index(item) - except ValueError: - idx = float("inf") - - return not item.is_eager, idx - - return sorted(declaration_order, key=sort_key) - - -class ParameterSource(enum.Enum): - """This is an :class:`~enum.Enum` that indicates the source of a - parameter's value. - - Use :meth:`click.Context.get_parameter_source` to get the - source for a parameter by name. - - .. versionchanged:: 8.0 - Use :class:`~enum.Enum` and drop the ``validate`` method. - - .. versionchanged:: 8.0 - Added the ``PROMPT`` value. - """ - - COMMANDLINE = enum.auto() - """The value was provided by the command line args.""" - ENVIRONMENT = enum.auto() - """The value was provided with an environment variable.""" - DEFAULT = enum.auto() - """Used the default specified by the parameter.""" - DEFAULT_MAP = enum.auto() - """Used a default provided by :attr:`Context.default_map`.""" - PROMPT = enum.auto() - """Used a prompt to confirm a default or provide a value.""" - - -class Context: - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. Default values will also be - ignored. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - :param show_default: Show the default value for commands. If this - value is not set, it defaults to the value from the parent - context. ``Command.show_default`` overrides this default for the - specific command. - - .. versionchanged:: 8.1 - The ``show_default`` parameter is overridden by - ``Command.show_default``, instead of the other way around. - - .. versionchanged:: 8.0 - The ``show_default`` parameter defaults to the value from the - parent context. - - .. versionchanged:: 7.1 - Added the ``show_default`` parameter. - - .. versionchanged:: 4.0 - Added the ``color``, ``ignore_unknown_options``, and - ``max_content_width`` parameters. - - .. versionchanged:: 3.0 - Added the ``allow_extra_args`` and ``allow_interspersed_args`` - parameters. - - .. versionchanged:: 2.0 - Added the ``resilient_parsing``, ``help_option_names``, and - ``token_normalize_func`` parameters. - """ - - #: The formatter class to create with :meth:`make_formatter`. - #: - #: .. versionadded:: 8.0 - formatter_class: t.Type["HelpFormatter"] = HelpFormatter - - def __init__( - self, - command: "Command", - parent: t.Optional["Context"] = None, - info_name: t.Optional[str] = None, - obj: t.Optional[t.Any] = None, - auto_envvar_prefix: t.Optional[str] = None, - default_map: t.Optional[t.Dict[str, t.Any]] = None, - terminal_width: t.Optional[int] = None, - max_content_width: t.Optional[int] = None, - resilient_parsing: bool = False, - allow_extra_args: t.Optional[bool] = None, - allow_interspersed_args: t.Optional[bool] = None, - ignore_unknown_options: t.Optional[bool] = None, - help_option_names: t.Optional[t.List[str]] = None, - token_normalize_func: t.Optional[t.Callable[[str], str]] = None, - color: t.Optional[bool] = None, - show_default: t.Optional[bool] = None, - ) -> None: - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: Map of parameter names to their parsed values. Parameters - #: with ``expose_value=False`` are not stored. - self.params: t.Dict[str, t.Any] = {} - #: the leftover arguments. - self.args: t.List[str] = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args: t.List[str] = [] - #: the collected prefixes of the command's options. - self._opt_prefixes: t.Set[str] = set(parent._opt_prefixes) if parent else set() - - if obj is None and parent is not None: - obj = parent.obj - - #: the user object stored. - self.obj: t.Any = obj - self._meta: t.Dict[str, t.Any] = getattr(parent, "meta", {}) - - #: A dictionary (-like object) with defaults for parameters. - if ( - default_map is None - and info_name is not None - and parent is not None - and parent.default_map is not None - ): - default_map = parent.default_map.get(info_name) - - self.default_map: t.Optional[t.Dict[str, t.Any]] = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`result_callback`. - self.invoked_subcommand: t.Optional[str] = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - - #: The width of the terminal (None is autodetection). - self.terminal_width: t.Optional[int] = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width: t.Optional[int] = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args: bool = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options: bool = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ["--help"] - - #: The names for the help options. - self.help_option_names: t.List[str] = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func: t.Optional[ - t.Callable[[str], str] - ] = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures and default values - #: will be ignored. Useful for completion. - self.resilient_parsing: bool = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if ( - parent is not None - and parent.auto_envvar_prefix is not None - and self.info_name is not None - ): - auto_envvar_prefix = ( - f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" - ) - else: - auto_envvar_prefix = auto_envvar_prefix.upper() - - if auto_envvar_prefix is not None: - auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") - - self.auto_envvar_prefix: t.Optional[str] = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color: t.Optional[bool] = color - - if show_default is None and parent is not None: - show_default = parent.show_default - - #: Show option default values when formatting help text. - self.show_default: t.Optional[bool] = show_default - - self._close_callbacks: t.List[t.Callable[[], t.Any]] = [] - self._depth = 0 - self._parameter_source: t.Dict[str, ParameterSource] = {} - self._exit_stack = ExitStack() - - def to_info_dict(self) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. This traverses the entire CLI - structure. - - .. code-block:: python - - with Context(cli) as ctx: - info = ctx.to_info_dict() - - .. versionadded:: 8.0 - """ - return { - "command": self.command.to_info_dict(self), - "info_name": self.info_name, - "allow_extra_args": self.allow_extra_args, - "allow_interspersed_args": self.allow_interspersed_args, - "ignore_unknown_options": self.ignore_unknown_options, - "auto_envvar_prefix": self.auto_envvar_prefix, - } - - def __enter__(self) -> "Context": - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): # type: ignore - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup: bool = True) -> t.Iterator["Context"]: - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self) -> t.Dict[str, t.Any]: - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utilities can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = f'{__name__}.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self) -> HelpFormatter: - """Creates the :class:`~click.HelpFormatter` for the help and - usage output. - - To quickly customize the formatter class used without overriding - this method, set the :attr:`formatter_class` attribute. - - .. versionchanged:: 8.0 - Added the :attr:`formatter_class` attribute. - """ - return self.formatter_class( - width=self.terminal_width, max_width=self.max_content_width - ) - - def with_resource(self, context_manager: t.ContextManager[V]) -> V: - """Register a resource as if it were used in a ``with`` - statement. The resource will be cleaned up when the context is - popped. - - Uses :meth:`contextlib.ExitStack.enter_context`. It calls the - resource's ``__enter__()`` method and returns the result. When - the context is popped, it closes the stack, which calls the - resource's ``__exit__()`` method. - - To register a cleanup function for something that isn't a - context manager, use :meth:`call_on_close`. Or use something - from :mod:`contextlib` to turn it into a context manager first. - - .. code-block:: python - - @click.group() - @click.option("--name") - @click.pass_context - def cli(ctx): - ctx.obj = ctx.with_resource(connect_db(name)) - - :param context_manager: The context manager to enter. - :return: Whatever ``context_manager.__enter__()`` returns. - - .. versionadded:: 8.0 - """ - return self._exit_stack.enter_context(context_manager) - - def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: - """Register a function to be called when the context tears down. - - This can be used to close resources opened during the script - execution. Resources that support Python's context manager - protocol which would be used in a ``with`` statement should be - registered with :meth:`with_resource` instead. - - :param f: The function to execute on teardown. - """ - return self._exit_stack.callback(f) - - def close(self) -> None: - """Invoke all close callbacks registered with - :meth:`call_on_close`, and exit all context managers entered - with :meth:`with_resource`. - """ - self._exit_stack.close() - # In case the context is reused, create a new exit stack. - self._exit_stack = ExitStack() - - @property - def command_path(self) -> str: - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = "" - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - parent_command_path = [self.parent.command_path] - - if isinstance(self.parent.command, Command): - for param in self.parent.command.get_params(self): - parent_command_path.extend(param.get_usage_pieces(self)) - - rv = f"{' '.join(parent_command_path)} {rv}" - return rv.lstrip() - - def find_root(self) -> "Context": - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type: t.Type[V]) -> t.Optional[V]: - """Finds the closest object of a given type.""" - node: t.Optional["Context"] = self - - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - - node = node.parent - - return None - - def ensure_object(self, object_type: t.Type[V]) -> V: - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - @t.overload - def lookup_default( - self, name: str, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def lookup_default( - self, name: str, call: "te.Literal[False]" = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def lookup_default(self, name: str, call: bool = True) -> t.Optional[t.Any]: - """Get the default for a parameter from :attr:`default_map`. - - :param name: Name of the parameter. - :param call: If the default is a callable, call it. Disable to - return the callable instead. - - .. versionchanged:: 8.0 - Added the ``call`` parameter. - """ - if self.default_map is not None: - value = self.default_map.get(name) - - if call and callable(value): - return value() - - return value - - return None - - def fail(self, message: str) -> "te.NoReturn": - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self) -> "te.NoReturn": - """Aborts the script.""" - raise Abort() - - def exit(self, code: int = 0) -> "te.NoReturn": - """Exits the application with a given exit code.""" - raise Exit(code) - - def get_usage(self) -> str: - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self) -> str: - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def _make_sub_context(self, command: "Command") -> "Context": - """Create a new context of the same type as this context, but - for a new command. - - :meta private: - """ - return type(self)(command, info_name=command.name, parent=self) - - def invoke( - __self, # noqa: B902 - __callback: t.Union["Command", t.Callable[..., t.Any]], - *args: t.Any, - **kwargs: t.Any, - ) -> t.Any: - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - - .. versionchanged:: 8.0 - All ``kwargs`` are tracked in :attr:`params` so they will be - passed if :meth:`forward` is called at multiple levels. - """ - if isinstance(__callback, Command): - other_cmd = __callback - - if other_cmd.callback is None: - raise TypeError( - "The given command does not have a callback that can be invoked." - ) - else: - __callback = other_cmd.callback - - ctx = __self._make_sub_context(other_cmd) - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.type_cast_value( # type: ignore - ctx, param.get_default(ctx) - ) - - # Track all kwargs as params, so that forward() will pass - # them on in subsequent calls. - ctx.params.update(kwargs) - else: - ctx = __self - - with augment_usage_errors(__self): - with ctx: - return __callback(*args, **kwargs) - - def forward( - __self, __cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902 - ) -> t.Any: - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - - .. versionchanged:: 8.0 - All ``kwargs`` are tracked in :attr:`params` so they will be - passed if ``forward`` is called at multiple levels. - """ - # Can only forward to other commands, not direct callbacks. - if not isinstance(__cmd, Command): - raise TypeError("Callback is not a command.") - - for param in __self.params: - if param not in kwargs: - kwargs[param] = __self.params[param] - - return __self.invoke(__cmd, *args, **kwargs) - - def set_parameter_source(self, name: str, source: ParameterSource) -> None: - """Set the source of a parameter. This indicates the location - from which the value of the parameter was obtained. - - :param name: The name of the parameter. - :param source: A member of :class:`~click.core.ParameterSource`. - """ - self._parameter_source[name] = source - - def get_parameter_source(self, name: str) -> t.Optional[ParameterSource]: - """Get the source of a parameter. This indicates the location - from which the value of the parameter was obtained. - - This can be useful for determining when a user specified a value - on the command line that is the same as the default value. It - will be :attr:`~click.core.ParameterSource.DEFAULT` only if the - value was actually taken from the default. - - :param name: The name of the parameter. - :rtype: ParameterSource - - .. versionchanged:: 8.0 - Returns ``None`` if the parameter was not provided from any - source. - """ - return self._parameter_source.get(name) - - -class BaseCommand: - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - - #: The context class to create with :meth:`make_context`. - #: - #: .. versionadded:: 8.0 - context_class: t.Type[Context] = Context - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__( - self, - name: t.Optional[str], - context_settings: t.Optional[t.Dict[str, t.Any]] = None, - ) -> None: - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - - if context_settings is None: - context_settings = {} - - #: an optional dictionary with defaults passed to the context. - self.context_settings: t.Dict[str, t.Any] = context_settings - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. This traverses the entire structure - below this command. - - Use :meth:`click.Context.to_info_dict` to traverse the entire - CLI structure. - - :param ctx: A :class:`Context` representing this command. - - .. versionadded:: 8.0 - """ - return {"name": self.name} - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.name}>" - - def get_usage(self, ctx: Context) -> str: - raise NotImplementedError("Base commands cannot get usage") - - def get_help(self, ctx: Context) -> str: - raise NotImplementedError("Base commands cannot get help") - - def make_context( - self, - info_name: t.Optional[str], - args: t.List[str], - parent: t.Optional[Context] = None, - **extra: t.Any, - ) -> Context: - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - To quickly customize the context class used without overriding - this method, set the :attr:`context_class` attribute. - - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the command. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - - .. versionchanged:: 8.0 - Added the :attr:`context_class` attribute. - """ - for key, value in self.context_settings.items(): - if key not in extra: - extra[key] = value - - ctx = self.context_class( - self, info_name=info_name, parent=parent, **extra # type: ignore - ) - - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError("Base commands do not know how to parse arguments.") - - def invoke(self, ctx: Context) -> t.Any: - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError("Base commands are not invokable by default") - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of chained multi-commands. - - Any command could be part of a chained multi-command, so sibling - commands are valid at any point during command completion. Other - command classes will return more completions. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results: t.List["CompletionItem"] = [] - - while ctx.parent is not None: - ctx = ctx.parent - - if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - results.extend( - CompletionItem(name, help=command.get_short_help_str()) - for name, command in _complete_visible_commands(ctx, incomplete) - if name not in ctx.protected_args - ) - - return results - - @t.overload - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: "te.Literal[True]" = True, - **extra: t.Any, - ) -> "te.NoReturn": - ... - - @t.overload - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: bool = ..., - **extra: t.Any, - ) -> t.Any: - ... - - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: bool = True, - windows_expand_args: bool = True, - **extra: t.Any, - ) -> t.Any: - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog_name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param windows_expand_args: Expand glob patterns, user dir, and - env vars in command line args on Windows. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - - .. versionchanged:: 8.0.1 - Added the ``windows_expand_args`` parameter to allow - disabling command line arg expansion on Windows. - - .. versionchanged:: 8.0 - When taking arguments from ``sys.argv`` on Windows, glob - patterns, user dir, and env vars are expanded. - - .. versionchanged:: 3.0 - Added the ``standalone_mode`` parameter. - """ - if args is None: - args = sys.argv[1:] - - if os.name == "nt" and windows_expand_args: - args = _expand_args(args) - else: - args = list(args) - - if prog_name is None: - prog_name = _detect_program_name() - - # Process shell completion requests and exit early. - self._main_shell_completion(extra, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - # it's not safe to `ctx.exit(rv)` here! - # note that `rv` may actually contain data like "1" which - # has obvious effects - # more subtle case: `rv=[None, None]` can come out of - # chained commands which all returned `None` -- so it's not - # even always obvious that `rv` indicates success/failure - # by its truthiness/falsiness - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() from None - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except OSError as e: - if e.errno == errno.EPIPE: - sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) - sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) - sys.exit(1) - else: - raise - except Exit as e: - if standalone_mode: - sys.exit(e.exit_code) - else: - # in non-standalone mode, return the exit code - # note that this is only reached if `self.invoke` above raises - # an Exit explicitly -- thus bypassing the check there which - # would return its result - # the results of non-standalone execution may therefore be - # somewhat ambiguous: if there are codepaths which lead to - # `ctx.exit(1)` and to `return 1`, the caller won't be able to - # tell the difference between the two - return e.exit_code - except Abort: - if not standalone_mode: - raise - echo(_("Aborted!"), file=sys.stderr) - sys.exit(1) - - def _main_shell_completion( - self, - ctx_args: t.Dict[str, t.Any], - prog_name: str, - complete_var: t.Optional[str] = None, - ) -> None: - """Check if the shell is asking for tab completion, process - that, then exit early. Called from :meth:`main` before the - program is invoked. - - :param prog_name: Name of the executable in the shell. - :param complete_var: Name of the environment variable that holds - the completion instruction. Defaults to - ``_{PROG_NAME}_COMPLETE``. - """ - if complete_var is None: - complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() - - instruction = os.environ.get(complete_var) - - if not instruction: - return - - from .shell_completion import shell_complete - - rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) - sys.exit(rv) - - def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is disabled by default. - If enabled this will add ``--help`` as argument - if no arguments are passed - :param hidden: hide this command from help outputs. - - :param deprecated: issues a message indicating that - the command is deprecated. - - .. versionchanged:: 8.1 - ``help``, ``epilog``, and ``short_help`` are stored unprocessed, - all formatting is done when outputting help text, not at init, - and is done even if not using the ``@command`` decorator. - - .. versionchanged:: 8.0 - Added a ``repr`` showing the command name. - - .. versionchanged:: 7.1 - Added the ``no_args_is_help`` parameter. - - .. versionchanged:: 2.0 - Added the ``context_settings`` parameter. - """ - - def __init__( - self, - name: t.Optional[str], - context_settings: t.Optional[t.Dict[str, t.Any]] = None, - callback: t.Optional[t.Callable[..., t.Any]] = None, - params: t.Optional[t.List["Parameter"]] = None, - help: t.Optional[str] = None, - epilog: t.Optional[str] = None, - short_help: t.Optional[str] = None, - options_metavar: t.Optional[str] = "[OPTIONS]", - add_help_option: bool = True, - no_args_is_help: bool = False, - hidden: bool = False, - deprecated: bool = False, - ) -> None: - super().__init__(name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params: t.List["Parameter"] = params or [] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - self.short_help = short_help - self.add_help_option = add_help_option - self.no_args_is_help = no_args_is_help - self.hidden = hidden - self.deprecated = deprecated - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict(ctx) - info_dict.update( - params=[param.to_info_dict() for param in self.get_params(ctx)], - help=self.help, - epilog=self.epilog, - short_help=self.short_help, - hidden=self.hidden, - deprecated=self.deprecated, - ) - return info_dict - - def get_usage(self, ctx: Context) -> str: - """Formats the usage line into a string and returns it. - - Calls :meth:`format_usage` internally. - """ - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_params(self, ctx: Context) -> t.List["Parameter"]: - rv = self.params - help_option = self.get_help_option(ctx) - - if help_option is not None: - rv = [*rv, help_option] - - return rv - - def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the usage line into the formatter. - - This is a low-level method called by :meth:`get_usage`. - """ - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, " ".join(pieces)) - - def collect_usage_pieces(self, ctx: Context) -> t.List[str]: - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] if self.options_metavar else [] - - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - - return rv - - def get_help_option_names(self, ctx: Context) -> t.List[str]: - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return list(all_names) - - def get_help_option(self, ctx: Context) -> t.Optional["Option"]: - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - - if not help_options or not self.add_help_option: - return None - - def show_help(ctx: Context, param: "Parameter", value: str) -> None: - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - return Option( - help_options, - is_flag=True, - is_eager=True, - expose_value=False, - callback=show_help, - help=_("Show this message and exit."), - ) - - def make_parser(self, ctx: Context) -> OptionParser: - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx: Context) -> str: - """Formats the help into a string and returns it. - - Calls :meth:`format_help` internally. - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_short_help_str(self, limit: int = 45) -> str: - """Gets short help for the command or makes it by shortening the - long help string. - """ - if self.short_help: - text = inspect.cleandoc(self.short_help) - elif self.help: - text = make_default_short_help(self.help, limit) - else: - text = "" - - if self.deprecated: - text = _("(Deprecated) {text}").format(text=text) - - return text.strip() - - def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the help into the formatter if it exists. - - This is a low-level method called by :meth:`get_help`. - - This calls the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the help text to the formatter if it exists.""" - text = self.help if self.help is not None else "" - - if self.deprecated: - text = _("(Deprecated) {text}").format(text=text) - - if text: - text = inspect.cleandoc(text).partition("\f")[0] - formatter.write_paragraph() - - with formatter.indentation(): - formatter.write_text(text) - - def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section(_("Options")): - formatter.write_dl(opts) - - def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - epilog = inspect.cleandoc(self.epilog) - formatter.write_paragraph() - - with formatter.indentation(): - formatter.write_text(epilog) - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing(param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail( - ngettext( - "Got unexpected extra argument ({args})", - "Got unexpected extra arguments ({args})", - len(args), - ).format(args=" ".join(map(str, args))) - ) - - ctx.args = args - ctx._opt_prefixes.update(parser._opt_prefixes) - return args - - def invoke(self, ctx: Context) -> t.Any: - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - if self.deprecated: - message = _( - "DeprecationWarning: The command {name!r} is deprecated." - ).format(name=self.name) - echo(style(message, fg="red"), err=True) - - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of options and chained multi-commands. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results: t.List["CompletionItem"] = [] - - if incomplete and not incomplete[0].isalnum(): - for param in self.get_params(ctx): - if ( - not isinstance(param, Option) - or param.hidden - or ( - not param.multiple - and ctx.get_parameter_source(param.name) # type: ignore - is ParameterSource.COMMANDLINE - ) - ): - continue - - results.extend( - CompletionItem(name, help=param.help) - for name in [*param.opts, *param.secondary_opts] - if name.startswith(incomplete) - ) - - results.extend(super().shell_complete(ctx, incomplete)) - return results - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: The result callback to attach to this multi - command. This can be set or changed later with the - :meth:`result_callback` decorator. - """ - - allow_extra_args = True - allow_interspersed_args = False - - def __init__( - self, - name: t.Optional[str] = None, - invoke_without_command: bool = False, - no_args_is_help: t.Optional[bool] = None, - subcommand_metavar: t.Optional[str] = None, - chain: bool = False, - result_callback: t.Optional[t.Callable[..., t.Any]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - - if subcommand_metavar is None: - if chain: - subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." - else: - subcommand_metavar = "COMMAND [ARGS]..." - - self.subcommand_metavar = subcommand_metavar - self.chain = chain - # The result callback that is stored. This can be set or - # overridden with the :func:`result_callback` decorator. - self._result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError( - "Multi commands in chain mode cannot have" - " optional arguments." - ) - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict(ctx) - commands = {} - - for name in self.list_commands(ctx): - command = self.get_command(ctx, name) - - if command is None: - continue - - sub_ctx = ctx._make_sub_context(command) - - with sub_ctx.scope(cleanup=False): - commands[name] = command.to_info_dict(sub_ctx) - - info_dict.update(commands=commands, chain=self.chain) - return info_dict - - def collect_usage_pieces(self, ctx: Context) -> t.List[str]: - rv = super().collect_usage_pieces(ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: - super().format_options(ctx, formatter) - self.format_commands(ctx, formatter) - - def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: - """Adds a result callback to the command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.result_callback() - def process_result(result, input): - return result + input - - :param replace: if set to `True` an already existing result - callback will be removed. - - .. versionchanged:: 8.0 - Renamed from ``resultcallback``. - - .. versionadded:: 3.0 - """ - - def decorator(f: F) -> F: - old_callback = self._result_callback - - if old_callback is None or replace: - self._result_callback = f - return f - - def function(__value, *args, **kwargs): # type: ignore - inner = old_callback(__value, *args, **kwargs) # type: ignore - return f(inner, *args, **kwargs) - - self._result_callback = rv = update_wrapper(t.cast(F, function), f) - return rv - - return decorator - - def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: - """Extra format methods for multi methods that adds all the commands - after the options. - """ - commands = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - if cmd.hidden: - continue - - commands.append((subcommand, cmd)) - - # allow for 3 times the default spacing - if len(commands): - limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) - - rows = [] - for subcommand, cmd in commands: - help = cmd.get_short_help_str(limit) - rows.append((subcommand, help)) - - if rows: - with formatter.section(_("Commands")): - formatter.write_dl(rows) - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = super().parse_args(ctx, args) - - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx: Context) -> t.Any: - def _process_result(value: t.Any) -> t.Any: - if self._result_callback is not None: - value = ctx.invoke(self._result_callback, value, **ctx.params) - return value - - if not ctx.protected_args: - if self.invoke_without_command: - # No subcommand was invoked, so the result callback is - # invoked with the group return value for regular - # groups, or an empty list for chained groups. - with ctx: - rv = super().invoke(ctx) - return _process_result([] if self.chain else rv) - ctx.fail(_("Missing command.")) - - # Fetch args back out - args = [*ctx.protected_args, *ctx.args] - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - assert cmd is not None - ctx.invoked_subcommand = cmd_name - super().invoke(ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = "*" if args else None - super().invoke(ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - assert cmd is not None - sub_ctx = cmd.make_context( - cmd_name, - args, - parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - ) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command( - self, ctx: Context, args: t.List[str] - ) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]: - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None and not ctx.resilient_parsing: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) - return cmd_name if cmd else None, cmd, args[1:] - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError - - def list_commands(self, ctx: Context) -> t.List[str]: - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of options, subcommands, and chained - multi-commands. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results = [ - CompletionItem(name, help=command.get_short_help_str()) - for name, command in _complete_visible_commands(ctx, incomplete) - ] - results.extend(super().shell_complete(ctx, incomplete)) - return results - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is - the most common way to implement nesting in Click. - - :param name: The name of the group command. - :param commands: A dict mapping names to :class:`Command` objects. - Can also be a list of :class:`Command`, which will use - :attr:`Command.name` to create the dict. - :param attrs: Other command arguments described in - :class:`MultiCommand`, :class:`Command`, and - :class:`BaseCommand`. - - .. versionchanged:: 8.0 - The ``commmands`` argument can be a list of command objects. - """ - - #: If set, this is used by the group's :meth:`command` decorator - #: as the default :class:`Command` class. This is useful to make all - #: subcommands use a custom command class. - #: - #: .. versionadded:: 8.0 - command_class: t.Optional[t.Type[Command]] = None - - #: If set, this is used by the group's :meth:`group` decorator - #: as the default :class:`Group` class. This is useful to make all - #: subgroups use a custom group class. - #: - #: If set to the special value :class:`type` (literally - #: ``group_class = type``), this group's class will be used as the - #: default class. This makes a custom group class continue to make - #: custom groups. - #: - #: .. versionadded:: 8.0 - group_class: t.Optional[t.Union[t.Type["Group"], t.Type[type]]] = None - # Literal[type] isn't valid, so use Type[type] - - def __init__( - self, - name: t.Optional[str] = None, - commands: t.Optional[t.Union[t.Dict[str, Command], t.Sequence[Command]]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - - if commands is None: - commands = {} - elif isinstance(commands, abc.Sequence): - commands = {c.name: c for c in commands if c.name is not None} - - #: The registered subcommands by their exported names. - self.commands: t.Dict[str, Command] = commands - - def add_command(self, cmd: Command, name: t.Optional[str] = None) -> None: - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError("Command has no name.") - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - @t.overload - def command(self, __func: t.Callable[..., t.Any]) -> Command: - ... - - @t.overload - def command( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Callable[[t.Callable[..., t.Any]], Command]: - ... - - def command( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], Command], Command]: - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` and - immediately registers the created command with this group by - calling :meth:`add_command`. - - To customize the command class used, set the - :attr:`command_class` attribute. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - - .. versionchanged:: 8.0 - Added the :attr:`command_class` attribute. - """ - from .decorators import command - - if self.command_class and kwargs.get("cls") is None: - kwargs["cls"] = self.command_class - - func: t.Optional[t.Callable] = None - - if args and callable(args[0]): - assert ( - len(args) == 1 and not kwargs - ), "Use 'command(**kwargs)(callable)' to provide arguments." - (func,) = args - args = () - - def decorator(f: t.Callable[..., t.Any]) -> Command: - cmd: Command = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - if func is not None: - return decorator(func) - - return decorator - - @t.overload - def group(self, __func: t.Callable[..., t.Any]) -> "Group": - ... - - @t.overload - def group( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Callable[[t.Callable[..., t.Any]], "Group"]: - ... - - def group( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], "Group"], "Group"]: - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` and - immediately registers the created group with this group by - calling :meth:`add_command`. - - To customize the group class used, set the :attr:`group_class` - attribute. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - - .. versionchanged:: 8.0 - Added the :attr:`group_class` attribute. - """ - from .decorators import group - - func: t.Optional[t.Callable] = None - - if args and callable(args[0]): - assert ( - len(args) == 1 and not kwargs - ), "Use 'group(**kwargs)(callable)' to provide arguments." - (func,) = args - args = () - - if self.group_class is not None and kwargs.get("cls") is None: - if self.group_class is type: - kwargs["cls"] = type(self) - else: - kwargs["cls"] = self.group_class - - def decorator(f: t.Callable[..., t.Any]) -> "Group": - cmd: Group = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - if func is not None: - return decorator(func) - - return decorator - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - return self.commands.get(cmd_name) - - def list_commands(self, ctx: Context) -> t.List[str]: - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__( - self, - name: t.Optional[str] = None, - sources: t.Optional[t.List[MultiCommand]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - #: The list of registered multi commands. - self.sources: t.List[MultiCommand] = sources or [] - - def add_source(self, multi_cmd: MultiCommand) -> None: - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - - return rv - - return None - - def list_commands(self, ctx: Context) -> t.List[str]: - rv: t.Set[str] = set() - - for source in self.sources: - rv.update(source.list_commands(ctx)) - - return sorted(rv) - - -def _check_iter(value: t.Any) -> t.Iterator[t.Any]: - """Check if the value is iterable but not a string. Raises a type - error, or return an iterator over the value. - """ - if isinstance(value, str): - raise TypeError - - return iter(value) - - -class Parameter: - r"""A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: A function to further process or validate the value - after type conversion. It is called as ``f(ctx, param, value)`` - and must return the value. It is called for all sources, - including prompts. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). If ``nargs=-1``, all remaining - parameters are collected. - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - :param shell_complete: A function that returns custom shell - completions. Used instead of the param's type completion if - given. Takes ``ctx, param, incomplete`` and must return a list - of :class:`~click.shell_completion.CompletionItem` or a list of - strings. - - .. versionchanged:: 8.0 - ``process_value`` validates required parameters and bounded - ``nargs``, and invokes the parameter callback before returning - the value. This allows the callback to validate prompts. - ``full_process_value`` is removed. - - .. versionchanged:: 8.0 - ``autocompletion`` is renamed to ``shell_complete`` and has new - semantics described above. The old name is deprecated and will - be removed in 8.1, until then it will be wrapped to match the - new requirements. - - .. versionchanged:: 8.0 - For ``multiple=True, nargs>1``, the default must be a list of - tuples. - - .. versionchanged:: 8.0 - Setting a default is no longer required for ``nargs>1``, it will - default to ``None``. ``multiple=True`` or ``nargs=-1`` will - default to ``()``. - - .. versionchanged:: 7.1 - Empty environment variables are ignored rather than taking the - empty string value. This makes it possible for scripts to clear - variables if they can't unset them. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. The old callback format will still work, but it will - raise a warning to give you a chance to migrate the code easier. - """ - - param_type_name = "parameter" - - def __init__( - self, - param_decls: t.Optional[t.Sequence[str]] = None, - type: t.Optional[t.Union[types.ParamType, t.Any]] = None, - required: bool = False, - default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None, - callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None, - nargs: t.Optional[int] = None, - multiple: bool = False, - metavar: t.Optional[str] = None, - expose_value: bool = True, - is_eager: bool = False, - envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None, - shell_complete: t.Optional[ - t.Callable[ - [Context, "Parameter", str], - t.Union[t.List["CompletionItem"], t.List[str]], - ] - ] = None, - ) -> None: - self.name, self.opts, self.secondary_opts = self._parse_decls( - param_decls or (), expose_value - ) - self.type = types.convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = multiple - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - self._custom_shell_complete = shell_complete - - if __debug__: - if self.type.is_composite and nargs != self.type.arity: - raise ValueError( - f"'nargs' must be {self.type.arity} (or None) for" - f" type {self.type!r}, but it was {nargs}." - ) - - # Skip no default or callable default. - check_default = default if not callable(default) else None - - if check_default is not None: - if multiple: - try: - # Only check the first value against nargs. - check_default = next(_check_iter(check_default), None) - except TypeError: - raise ValueError( - "'default' must be a list when 'multiple' is true." - ) from None - - # Can be None for multiple with empty default. - if nargs != 1 and check_default is not None: - try: - _check_iter(check_default) - except TypeError: - if multiple: - message = ( - "'default' must be a list of lists when 'multiple' is" - " true and 'nargs' != 1." - ) - else: - message = "'default' must be a list when 'nargs' != 1." - - raise ValueError(message) from None - - if nargs > 1 and len(check_default) != nargs: - subject = "item length" if multiple else "length" - raise ValueError( - f"'default' {subject} must match nargs={nargs}." - ) - - def to_info_dict(self) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. - - Use :meth:`click.Context.to_info_dict` to traverse the entire - CLI structure. - - .. versionadded:: 8.0 - """ - return { - "name": self.name, - "param_type_name": self.param_type_name, - "opts": self.opts, - "secondary_opts": self.secondary_opts, - "type": self.type.to_info_dict(), - "required": self.required, - "nargs": self.nargs, - "multiple": self.multiple, - "default": self.default, - "envvar": self.envvar, - } - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.name}>" - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - raise NotImplementedError() - - @property - def human_readable_name(self) -> str: - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name # type: ignore - - def make_metavar(self) -> str: - if self.metavar is not None: - return self.metavar - - metavar = self.type.get_metavar(self) - - if metavar is None: - metavar = self.type.name.upper() - - if self.nargs != 1: - metavar += "..." - - return metavar - - @t.overload - def get_default( - self, ctx: Context, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def get_default( - self, ctx: Context, call: bool = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def get_default( - self, ctx: Context, call: bool = True - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - """Get the default for the parameter. Tries - :meth:`Context.lookup_default` first, then the local default. - - :param ctx: Current context. - :param call: If the default is a callable, call it. Disable to - return the callable instead. - - .. versionchanged:: 8.0.2 - Type casting is no longer performed when getting a default. - - .. versionchanged:: 8.0.1 - Type casting can fail in resilient parsing mode. Invalid - defaults will not prevent showing help text. - - .. versionchanged:: 8.0 - Looks at ``ctx.default_map`` first. - - .. versionchanged:: 8.0 - Added the ``call`` parameter. - """ - value = ctx.lookup_default(self.name, call=False) # type: ignore - - if value is None: - value = self.default - - if call and callable(value): - value = value() - - return value - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - raise NotImplementedError() - - def consume_value( - self, ctx: Context, opts: t.Mapping[str, t.Any] - ) -> t.Tuple[t.Any, ParameterSource]: - value = opts.get(self.name) # type: ignore - source = ParameterSource.COMMANDLINE - - if value is None: - value = self.value_from_envvar(ctx) - source = ParameterSource.ENVIRONMENT - - if value is None: - value = ctx.lookup_default(self.name) # type: ignore - source = ParameterSource.DEFAULT_MAP - - if value is None: - value = self.get_default(ctx) - source = ParameterSource.DEFAULT - - return value, source - - def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: - """Convert and validate a value against the option's - :attr:`type`, :attr:`multiple`, and :attr:`nargs`. - """ - if value is None: - return () if self.multiple or self.nargs == -1 else None - - def check_iter(value: t.Any) -> t.Iterator: - try: - return _check_iter(value) - except TypeError: - # This should only happen when passing in args manually, - # the parser should construct an iterable when parsing - # the command line. - raise BadParameter( - _("Value must be an iterable."), ctx=ctx, param=self - ) from None - - if self.nargs == 1 or self.type.is_composite: - convert: t.Callable[[t.Any], t.Any] = partial( - self.type, param=self, ctx=ctx - ) - elif self.nargs == -1: - - def convert(value: t.Any) -> t.Tuple: - return tuple(self.type(x, self, ctx) for x in check_iter(value)) - - else: # nargs > 1 - - def convert(value: t.Any) -> t.Tuple: - value = tuple(check_iter(value)) - - if len(value) != self.nargs: - raise BadParameter( - ngettext( - "Takes {nargs} values but 1 was given.", - "Takes {nargs} values but {len} were given.", - len(value), - ).format(nargs=self.nargs, len=len(value)), - ctx=ctx, - param=self, - ) - - return tuple(self.type(x, self, ctx) for x in value) - - if self.multiple: - return tuple(convert(x) for x in check_iter(value)) - - return convert(value) - - def value_is_missing(self, value: t.Any) -> bool: - if value is None: - return True - - if (self.nargs != 1 or self.multiple) and value == (): - return True - - return False - - def process_value(self, ctx: Context, value: t.Any) -> t.Any: - value = self.type_cast_value(ctx, value) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - if self.callback is not None: - value = self.callback(ctx, self, value) - - return value - - def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: - if self.envvar is None: - return None - - if isinstance(self.envvar, str): - rv = os.environ.get(self.envvar) - - if rv: - return rv - else: - for envvar in self.envvar: - rv = os.environ.get(envvar) - - if rv: - return rv - - return None - - def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: - rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) - - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - - return rv - - def handle_parse_result( - self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str] - ) -> t.Tuple[t.Any, t.List[str]]: - with augment_usage_errors(ctx, param=self): - value, source = self.consume_value(ctx, opts) - ctx.set_parameter_source(self.name, source) # type: ignore - - try: - value = self.process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - - value = None - - if self.expose_value: - ctx.params[self.name] = value # type: ignore - - return value, args - - def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: - pass - - def get_usage_pieces(self, ctx: Context) -> t.List[str]: - return [] - - def get_error_hint(self, ctx: Context) -> str: - """Get a stringified version of the param for use in error messages to - indicate which param caused the error. - """ - hint_list = self.opts or [self.human_readable_name] - return " / ".join(f"'{x}'" for x in hint_list) - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. If a - ``shell_complete`` function was given during init, it is used. - Otherwise, the :attr:`type` - :meth:`~click.types.ParamType.shell_complete` function is used. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - if self._custom_shell_complete is not None: - results = self._custom_shell_complete(ctx, self, incomplete) - - if results and isinstance(results[0], str): - from click.shell_completion import CompletionItem - - results = [CompletionItem(c) for c in results] - - return t.cast(t.List["CompletionItem"], results) - - return self.type.shell_complete(ctx, self, incomplete) - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: Show the default value for this option in its - help text. Values are not shown by default, unless - :attr:`Context.show_default` is ``True``. If this value is a - string, it shows that string in parentheses instead of the - actual value. This is particularly useful for dynamic options. - For single option boolean flags, the default remains hidden if - its value is ``False``. - :param show_envvar: Controls if an environment variable should be - shown on the help page. Normally, environment variables are not - shown. - :param prompt: If set to ``True`` or a non empty string then the - user will be prompted for input. If set to ``True`` the prompt - will be the option name capitalized. - :param confirmation_prompt: Prompt a second time to confirm the - value if it was prompted for. Can be set to a string instead of - ``True`` to customize the message. - :param prompt_required: If set to ``False``, the user will be - prompted for input only when the option was specified as a flag - without a value. - :param hide_input: If this is ``True`` then the input on the prompt - will be hidden from the user. This is useful for password input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - :param hidden: hide this option from help outputs. - - .. versionchanged:: 8.1.0 - Help text indentation is cleaned here instead of only in the - ``@option`` decorator. - - .. versionchanged:: 8.1.0 - The ``show_default`` parameter overrides - ``Context.show_default``. - - .. versionchanged:: 8.1.0 - The default of a single option boolean flag is not shown if the - default value is ``False``. - - .. versionchanged:: 8.0.1 - ``type`` is detected from ``flag_value`` if given. - """ - - param_type_name = "option" - - def __init__( - self, - param_decls: t.Optional[t.Sequence[str]] = None, - show_default: t.Union[bool, str, None] = None, - prompt: t.Union[bool, str] = False, - confirmation_prompt: t.Union[bool, str] = False, - prompt_required: bool = True, - hide_input: bool = False, - is_flag: t.Optional[bool] = None, - flag_value: t.Optional[t.Any] = None, - multiple: bool = False, - count: bool = False, - allow_from_autoenv: bool = True, - type: t.Optional[t.Union[types.ParamType, t.Any]] = None, - help: t.Optional[str] = None, - hidden: bool = False, - show_choices: bool = True, - show_envvar: bool = False, - **attrs: t.Any, - ) -> None: - if help: - help = inspect.cleandoc(help) - - default_is_missing = "default" not in attrs - super().__init__(param_decls, type=type, multiple=multiple, **attrs) - - if prompt is True: - if self.name is None: - raise TypeError("'name' is required with 'prompt=True'.") - - prompt_text: t.Optional[str] = self.name.replace("_", " ").capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.prompt_required = prompt_required - self.hide_input = hide_input - self.hidden = hidden - - # If prompt is enabled but not required, then the option can be - # used as a flag to indicate using prompt or flag_value. - self._flag_needs_value = self.prompt is not None and not self.prompt_required - - if is_flag is None: - if flag_value is not None: - # Implicitly a flag because flag_value was set. - is_flag = True - elif self._flag_needs_value: - # Not a flag, but when used as a flag it shows a prompt. - is_flag = False - else: - # Implicitly a flag because flag options were given. - is_flag = bool(self.secondary_opts) - elif is_flag is False and not self._flag_needs_value: - # Not a flag, and prompt is not enabled, can be used as a - # flag if flag_value is set. - self._flag_needs_value = flag_value is not None - - if is_flag and default_is_missing and not self.required: - self.default: t.Union[t.Any, t.Callable[[], t.Any]] = False - - if flag_value is None: - flag_value = not self.default - - if is_flag and type is None: - # Re-guess the type from the flag value instead of the - # default. - self.type = types.convert_type(None, flag_value) - - self.is_flag: bool = is_flag - self.is_bool_flag = is_flag and isinstance(self.type, types.BoolParamType) - self.flag_value: t.Any = flag_value - - # Counting - self.count = count - if count: - if type is None: - self.type = types.IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - self.show_choices = show_choices - self.show_envvar = show_envvar - - if __debug__: - if self.nargs == -1: - raise TypeError("nargs=-1 is not supported for options.") - - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError("'prompt' is not valid for non-boolean flag.") - - if not self.is_bool_flag and self.secondary_opts: - raise TypeError("Secondary flag is not valid for non-boolean flag.") - - if self.is_bool_flag and self.hide_input and self.prompt is not None: - raise TypeError( - "'prompt' with 'hide_input' is not valid for boolean flag." - ) - - if self.count: - if self.multiple: - raise TypeError("'count' is not valid with 'multiple'.") - - if self.is_flag: - raise TypeError("'count' is not valid with 'is_flag'.") - - if self.multiple and self.is_flag: - raise TypeError("'multiple' is not valid with 'is_flag', use 'count'.") - - def to_info_dict(self) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict() - info_dict.update( - help=self.help, - prompt=self.prompt, - is_flag=self.is_flag, - flag_value=self.flag_value, - count=self.count, - hidden=self.hidden, - ) - return info_dict - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if decl.isidentifier(): - if name is not None: - raise TypeError(f"Name '{name}' defined twice") - name = decl - else: - split_char = ";" if decl[:1] == "/" else "/" - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - if first == second: - raise ValueError( - f"Boolean option {decl!r} cannot use the" - " same flag for true/false." - ) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace("-", "_").lower() - if not name.isidentifier(): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError("Could not determine name for option") - - if not opts and not secondary_opts: - raise TypeError( - f"No options defined but a name was passed ({name})." - " Did you mean to declare an argument instead? Did" - f" you mean to pass '--{name}'?" - ) - - return name, opts, secondary_opts - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - if self.multiple: - action = "append" - elif self.count: - action = "count" - else: - action = "store" - - if self.is_flag: - action = f"{action}_const" - - if self.is_bool_flag and self.secondary_opts: - parser.add_option( - obj=self, opts=self.opts, dest=self.name, action=action, const=True - ) - parser.add_option( - obj=self, - opts=self.secondary_opts, - dest=self.name, - action=action, - const=False, - ) - else: - parser.add_option( - obj=self, - opts=self.opts, - dest=self.name, - action=action, - const=self.flag_value, - ) - else: - parser.add_option( - obj=self, - opts=self.opts, - dest=self.name, - action=action, - nargs=self.nargs, - ) - - def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: - if self.hidden: - return None - - any_prefix_is_slash = False - - def _write_opts(opts: t.Sequence[str]) -> str: - nonlocal any_prefix_is_slash - - rv, any_slashes = join_options(opts) - - if any_slashes: - any_prefix_is_slash = True - - if not self.is_flag and not self.count: - rv += f" {self.make_metavar()}" - - return rv - - rv = [_write_opts(self.opts)] - - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or "" - extra = [] - - if self.show_envvar: - envvar = self.envvar - - if envvar is None: - if ( - self.allow_from_autoenv - and ctx.auto_envvar_prefix is not None - and self.name is not None - ): - envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" - - if envvar is not None: - var_str = ( - envvar - if isinstance(envvar, str) - else ", ".join(str(d) for d in envvar) - ) - extra.append(_("env var: {var}").format(var=var_str)) - - # Temporarily enable resilient parsing to avoid type casting - # failing for the default. Might be possible to extend this to - # help formatting in general. - resilient = ctx.resilient_parsing - ctx.resilient_parsing = True - - try: - default_value = self.get_default(ctx, call=False) - finally: - ctx.resilient_parsing = resilient - - show_default = False - show_default_is_str = False - - if self.show_default is not None: - if isinstance(self.show_default, str): - show_default_is_str = show_default = True - else: - show_default = self.show_default - elif ctx.show_default is not None: - show_default = ctx.show_default - - if show_default_is_str or (show_default and (default_value is not None)): - if show_default_is_str: - default_string = f"({self.show_default})" - elif isinstance(default_value, (list, tuple)): - default_string = ", ".join(str(d) for d in default_value) - elif inspect.isfunction(default_value): - default_string = _("(dynamic)") - elif self.is_bool_flag and self.secondary_opts: - # For boolean flags that have distinct True/False opts, - # use the opt without prefix instead of the value. - default_string = split_opt( - (self.opts if self.default else self.secondary_opts)[0] - )[1] - elif self.is_bool_flag and not self.secondary_opts and not default_value: - default_string = "" - else: - default_string = str(default_value) - - if default_string: - extra.append(_("default: {default}").format(default=default_string)) - - if ( - isinstance(self.type, types._NumberRangeBase) - # skip count with default range type - and not (self.count and self.type.min == 0 and self.type.max is None) - ): - range_str = self.type._describe_range() - - if range_str: - extra.append(range_str) - - if self.required: - extra.append(_("required")) - - if extra: - extra_str = "; ".join(extra) - help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" - - return ("; " if any_prefix_is_slash else " / ").join(rv), help - - @t.overload - def get_default( - self, ctx: Context, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def get_default( - self, ctx: Context, call: bool = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def get_default( - self, ctx: Context, call: bool = True - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - # If we're a non boolean flag our default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value # type: ignore - - return None - - return super().get_default(ctx, call=call) - - def prompt_for_value(self, ctx: Context) -> t.Any: - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - assert self.prompt is not None - - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt( - self.prompt, - default=default, - type=self.type, - hide_input=self.hide_input, - show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x), - ) - - def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: - rv = super().resolve_envvar_value(ctx) - - if rv is not None: - return rv - - if ( - self.allow_from_autoenv - and ctx.auto_envvar_prefix is not None - and self.name is not None - ): - envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" - rv = os.environ.get(envvar) - - if rv: - return rv - - return None - - def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: - rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) - - if rv is None: - return None - - value_depth = (self.nargs != 1) + bool(self.multiple) - - if value_depth > 0: - rv = self.type.split_envvar_value(rv) - - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - - return rv - - def consume_value( - self, ctx: Context, opts: t.Mapping[str, "Parameter"] - ) -> t.Tuple[t.Any, ParameterSource]: - value, source = super().consume_value(ctx, opts) - - # The parser will emit a sentinel value if the option can be - # given as a flag without a value. This is different from None - # to distinguish from the flag not being given at all. - if value is _flag_needs_value: - if self.prompt is not None and not ctx.resilient_parsing: - value = self.prompt_for_value(ctx) - source = ParameterSource.PROMPT - else: - value = self.flag_value - source = ParameterSource.COMMANDLINE - - elif ( - self.multiple - and value is not None - and any(v is _flag_needs_value for v in value) - ): - value = [self.flag_value if v is _flag_needs_value else v for v in value] - source = ParameterSource.COMMANDLINE - - # The value wasn't set, or used the param's default, prompt if - # prompting is enabled. - elif ( - source in {None, ParameterSource.DEFAULT} - and self.prompt is not None - and (self.required or self.prompt_required) - and not ctx.resilient_parsing - ): - value = self.prompt_for_value(ctx) - source = ParameterSource.PROMPT - - return value, source - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - - param_type_name = "argument" - - def __init__( - self, - param_decls: t.Sequence[str], - required: t.Optional[bool] = None, - **attrs: t.Any, - ) -> None: - if required is None: - if attrs.get("default") is not None: - required = False - else: - required = attrs.get("nargs", 1) > 0 - - if "multiple" in attrs: - raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") - - super().__init__(param_decls, required=required, **attrs) - - if __debug__: - if self.default is not None and self.nargs == -1: - raise TypeError("'default' is not supported for nargs=-1.") - - @property - def human_readable_name(self) -> str: - if self.metavar is not None: - return self.metavar - return self.name.upper() # type: ignore - - def make_metavar(self) -> str: - if self.metavar is not None: - return self.metavar - var = self.type.get_metavar(self) - if not var: - var = self.name.upper() # type: ignore - if not self.required: - var = f"[{var}]" - if self.nargs != 1: - var += "..." - return var - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - if not decls: - if not expose_value: - return None, [], [] - raise TypeError("Could not determine name for argument") - if len(decls) == 1: - name = arg = decls[0] - name = name.replace("-", "_").lower() - else: - raise TypeError( - "Arguments take exactly one parameter declaration, got" - f" {len(decls)}." - ) - return name, [arg], [] - - def get_usage_pieces(self, ctx: Context) -> t.List[str]: - return [self.make_metavar()] - - def get_error_hint(self, ctx: Context) -> str: - return f"'{self.make_metavar()}'" - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/spaces/arxnov/anotest/monotonic_align/setup.py b/spaces/arxnov/anotest/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/arxnov/anotest/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/auto-academic/auto-draft/tools.py b/spaces/auto-academic/auto-draft/tools.py deleted file mode 100644 index cfee7fc8b6e9f24bb85b47600fde9016280bd52a..0000000000000000000000000000000000000000 --- a/spaces/auto-academic/auto-draft/tools.py +++ /dev/null @@ -1,206 +0,0 @@ -import os -import openai -from utils.references import References -from utils.gpt_interaction import GPTModel -from utils.prompts import SYSTEM -from langchain.tools import BaseTool -from pydantic import BaseModel, Field -from typing import Optional, Type - -MAX_TOKENS = 2048 - -openai.api_key = os.getenv("OPENAI_API_KEY") -default_model = os.getenv("DEFAULT_MODEL") -if default_model is None: - default_model = "gpt-3.5-turbo-16k" -llm = GPTModel(model=default_model, delay=1) - -paper_system_prompt = '''You are an assistant designed to propose choices of research direction. -The user will input questions or some keywords of a fields. You need to generate some paper titles and main contributions. Ensure follow the following instructions: -Instruction: -- Your response should follow the JSON format. -- Your response should have the following structure: -{ - "your suggested paper title": - { - "summary": "an overview introducing what this paper will include", - "contributions": { - "contribution1": {"statement": "briefly describe this contribution", "reason": "reason why this contribution can make this paper outstanding"}, - "contribution2": {"statement": "briefly describe this contribution", "reason": "reason why this contribution can make this paper outstanding"}, - ... - } - } - "your suggested paper title": - { - "summary": "an overview introducing what this paper will include", - "contributions": { - "contribution1": {"statement": "briefly describe this contribution", "reason": "reason why this contribution can make this paper outstanding"}, - "contribution2": {"statement": "briefly describe this contribution", "reason": "reason why this contribution can make this paper outstanding"}, - ... - } - } - ... -} -- Please list three to five suggested title and at least three contributions for each paper. -''' - - -contribution_system_prompt = '''You are an assistant designed to criticize the contributions of a paper. You will be provided Paper's Title, References and Contributions. Ensure follow the following instructions: -Instruction: -- Your response should follow the JSON format. -- Your response should have the following structure: -{ - "title": "the title provided by the user", - "comment": "your thoughts on if this title clearly reflects the key ideas of this paper and explain why" - "contributions": { - "contribution1": {"statement": "briefly describe what the contribution is", - "reason": "reason why the user claims it is a contribution", - "judge": "your thought about if this is a novel contribution and explain why", - "suggestion": "your suggestion on how to modify the research direction to enhance the novelty "}, - "contribution2": {"statement": "briefly describe what the contribution is", - "reason": "reason why the user claims it is a contribution", - "judge": "your thought about if this is a novel contribution and explain why", - "suggestion": "your suggestion on how to modify the research direction to enhance the novelty "}, - ... - } -} -- You need to carefully check if the claimed contribution has been made in the provided references, which makes the contribution not novel. -- You also need to propose your concerns on if any of contributions could be incremental or just a mild modification on an existing work. -''' - - -def find_research_directions(research_field): - output, _ = llm(systems=paper_system_prompt, prompts=research_field, return_json=False) - return output - -def find_references(title, contributions): - max_tokens = MAX_TOKENS - ref = References(title=title, description=f"{contributions}") - keywords, _ = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True) - keywords = {keyword: 10 for keyword in keywords} - ref.collect_papers(keywords) - ref_prompt = ref.to_prompts(max_tokens=max_tokens) - return ref_prompt - - -def judge_novelty(title, contributions): - max_tokens = MAX_TOKENS - ref = References(title=title, description=f"{contributions}") - keywords, _ = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True) - keywords = {keyword: 10 for keyword in keywords} - ref.collect_papers(keywords) - ref_prompt = ref.to_prompts(max_tokens=max_tokens) - prompt = f"Title: {title}\n References: {ref_prompt}\n Contributions: {contributions}" - output, _ = llm(systems=contribution_system_prompt, prompts=prompt, return_json=False) - return output - - -functions = [ - { - "name": "find_research_directions", - "description": "when your student has already shown interests in a specific topic and provided a rough description of potential contributions, help your student to dive this direction deeper", - "parameters": { - "type": "object", - "properties": { - "research_description": { - "type": "string", - "description": "a paragraph with details in English describing " - "(1) what is the main problem you are trying to solve " - "(2) what is the main novelty of this idea (3) how to complete this research." - } - }, - "required": ["research_description"], - }, - }, - { - "name": "find_references", - "description": "find references for given details of a paper", - "parameters": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "the title (in English) of the academic paper your student will write.", - }, - "contributions": {"type": "string", - "description": "a general description on the contributions of this paper in English." - "If there are multiple contributions, index them with numbers."}, - }, - "required": ["title", "contributions"], - }, - }, - { - "name": "judge_novelty", - "description": "evaluate the novelty of a paper given its title and main contributions", - "parameters": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "the title (in English) of the academic paper your student will write.", - }, - "contributions": {"type": "string", - "description": "a general description on the contributions of this paper in English." - "If there are multiple contributions, index them with numbers."}, - }, - "required": ["title", "contributions"], - }, - } -] - -TOOLS = {"find_research_directions": find_research_directions, "find_references": find_references, "judge_novelty": judge_novelty} - -class FindResearchDirectionsCheckInput(BaseModel): - research_description: str = Field(..., description="a paragraph with details in English describing (1) what is the main problem you are trying to solve " - "(2) what is the main novelty of this idea (3) how to complete this research.") - -class TitleDescriptionCheckInput(BaseModel): - title: str = Field(..., description="the title of the academic paper your student will write in English.") - contributions: str = Field(..., description="a general description on the contributions of this paper in English." - "If there are multiple contributions, index them with numbers.") - - -class FindResearchDirectionsTool(BaseTool): - name = "find_research_directions" - description = """Useful when your student has already shown interests in a specific topic and provided a rough description of - potential contributions and you need to help your student to dive this direction deeper for your student. - - """ - def _run(self, research_description: str): - response = find_research_directions(research_description) - return response - - def _arun(self, research_field: str): - raise NotImplementedError("This tool does not support async") - - args_schema: Optional[Type[BaseModel]] = FindResearchDirectionsCheckInput - - -class JudgeNoveltyTool(BaseTool): - name = "judge_novelty" - description = """Useful when you need to evaluate the novelty of your student's idea. - - """ - def _run(self, title: str, contributions: str): - response = judge_novelty(title, contributions) - return response - - def _arun(self, title: str, contributions: str): - raise NotImplementedError("This tool does not support async") - - args_schema: Optional[Type[BaseModel]] = TitleDescriptionCheckInput - -class FindReferencesTool(BaseTool): - name = "find_references" - description = """Useful when you need to find references for a paper. - - """ - def _run(self, title: str, contributions: str): - response = find_references(title, contributions) - return response - - def _arun(self, title: str, contributions: str): - raise NotImplementedError("This tool does not support async") - - args_schema: Optional[Type[BaseModel]] = TitleDescriptionCheckInput - diff --git a/spaces/autoevaluate/model-evaluator/evaluation.py b/spaces/autoevaluate/model-evaluator/evaluation.py deleted file mode 100644 index a10c86cd0e068e277ecfdc8581db262b74a5d2f0..0000000000000000000000000000000000000000 --- a/spaces/autoevaluate/model-evaluator/evaluation.py +++ /dev/null @@ -1,63 +0,0 @@ -import copy -from dataclasses import dataclass - -import streamlit as st -from huggingface_hub import DatasetFilter, HfApi -from huggingface_hub.hf_api import DatasetInfo - - -@dataclass(frozen=True, eq=True) -class EvaluationInfo: - task: str - model: str - dataset_name: str - dataset_config: str - dataset_split: str - metrics: set - - -def create_evaluation_info(dataset_info: DatasetInfo) -> int: - if dataset_info.cardData is not None: - metadata = dataset_info.cardData["eval_info"] - metadata.pop("col_mapping", None) - # TODO(lewtun): populate dataset cards with metric info - if "metrics" not in metadata: - metadata["metrics"] = frozenset() - else: - metadata["metrics"] = frozenset(metadata["metrics"]) - return EvaluationInfo(**metadata) - - -def get_evaluation_infos(): - evaluation_datasets = [] - filt = DatasetFilter(author="autoevaluate") - autoevaluate_datasets = HfApi().list_datasets(filter=filt, full=True) - for dset in autoevaluate_datasets: - try: - evaluation_datasets.append(create_evaluation_info(dset)) - except Exception as e: - print(f"Error processing dataset {dset}: {e}") - return evaluation_datasets - - -def filter_evaluated_models(models, task, dataset_name, dataset_config, dataset_split, metrics): - evaluation_infos = get_evaluation_infos() - models_to_filter = copy.copy(models) - - for model in models_to_filter: - evaluation_info = EvaluationInfo( - task=task, - model=model, - dataset_name=dataset_name, - dataset_config=dataset_config, - dataset_split=dataset_split, - metrics=frozenset(metrics), - ) - if evaluation_info in evaluation_infos: - st.info( - f"Model [`{model}`](https://huggingface.co/{model}) has already been evaluated on this configuration. \ - This model will be excluded from the evaluation job..." - ) - models.remove(model) - - return models diff --git a/spaces/awacke1/ASR-High-Accuracy-Test/README.md b/spaces/awacke1/ASR-High-Accuracy-Test/README.md deleted file mode 100644 index b9a4301b8ed317fc768c0fbccb6c8431f9749528..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ASR-High-Accuracy-Test/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 🗣️ASR Wav2Vec2 GRadio Multilingual📄 -emoji: 🗣️ASR💻 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.0.17 -app_file: app.py -pinned: false -license: mit ---- - -More on wav2vec2: https://www.youtube.com/watch?v=fMqYul2TvBE -4.8/8.2 Word Error Rate (WER) - diff --git a/spaces/awacke1/GroupSimilarDataCluster/README.md b/spaces/awacke1/GroupSimilarDataCluster/README.md deleted file mode 100644 index 44534829c46343eb4d0d30417865cd111d0cefd7..0000000000000000000000000000000000000000 --- a/spaces/awacke1/GroupSimilarDataCluster/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🦜GroupSimilarDataCluster🦚 -emoji: 🦜🦚 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/MistralCoder/README.md b/spaces/awacke1/MistralCoder/README.md deleted file mode 100644 index 489709292242c6259f511fc5b55d9b62f230c13e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MistralCoder/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MistralCoder -emoji: 👁 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awsaf49/gcvit-tf/gcvit/layers/drop.py b/spaces/awsaf49/gcvit-tf/gcvit/layers/drop.py deleted file mode 100644 index 340f7e54f556237f9da792c27ead86424fb8f502..0000000000000000000000000000000000000000 --- a/spaces/awsaf49/gcvit-tf/gcvit/layers/drop.py +++ /dev/null @@ -1,40 +0,0 @@ -import tensorflow as tf - - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class Identity(tf.keras.layers.Layer): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def call(self, x): - return tf.identity(x) - - def get_config(self): - config = super().get_config() - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class DropPath(tf.keras.layers.Layer): - def __init__(self, drop_prob=0., scale_by_keep=True, **kwargs): - super().__init__(**kwargs) - self.drop_prob = drop_prob - self.scale_by_keep = scale_by_keep - - def call(self, x, training=None): - if self.drop_prob==0. or not training: - return x - keep_prob = 1 - self.drop_prob - shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) - random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) - random_tensor = tf.floor(random_tensor) - if keep_prob > 0.0 and self.scale_by_keep: - x = (x / keep_prob) - return x * random_tensor - - def get_config(self): - config = super().get_config() - config.update({ - "drop_prob": self.drop_prob, - "scale_by_keep": self.scale_by_keep - }) - return config \ No newline at end of file diff --git a/spaces/badongtakla/ithaca/ithaca/models/model.py b/spaces/badongtakla/ithaca/ithaca/models/model.py deleted file mode 100644 index 5d19ac425e0c68f079834ffc492b9981e2c501a9..0000000000000000000000000000000000000000 --- a/spaces/badongtakla/ithaca/ithaca/models/model.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2021 the Ithaca Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Ithaca model.""" - -from . import bigbird -from . import common_layers - -import flax.linen as nn -import jax -import jax.numpy as jnp - - -class Model(nn.Module): - """Transformer Model for sequence tagging.""" - vocab_char_size: int = 164 - vocab_word_size: int = 100004 - output_subregions: int = 85 - output_date: int = 160 - output_date_dist: bool = True - output_return_emb: bool = False - use_output_mlp: bool = True - num_heads: int = 8 - num_layers: int = 6 - word_char_emb_dim: int = 192 - emb_dim: int = 512 - qkv_dim: int = 512 - mlp_dim: int = 2048 - max_len: int = 1024 - causal_mask: bool = False - feature_combine_type: str = 'concat' - posemb_combine_type: str = 'add' - region_date_pooling: str = 'first' - learn_pos_emb: bool = True - use_bfloat16: bool = False - dropout_rate: float = 0.1 - attention_dropout_rate: float = 0.1 - activation_fn: str = 'gelu' - model_type: str = 'bigbird' - - def setup(self): - self.text_char_emb = nn.Embed( - num_embeddings=self.vocab_char_size, - features=self.word_char_emb_dim, - embedding_init=nn.initializers.normal(stddev=1.0), - name='char_embeddings') - self.text_word_emb = nn.Embed( - num_embeddings=self.vocab_word_size, - features=self.word_char_emb_dim, - embedding_init=nn.initializers.normal(stddev=1.0), - name='word_embeddings') - - @nn.compact - def __call__(self, - text_char=None, - text_word=None, - text_char_onehot=None, - text_word_onehot=None, - text_char_emb=None, - text_word_emb=None, - padding=None, - is_training=True): - """Applies Ithaca model on the inputs.""" - - if text_char is not None and padding is None: - padding = jnp.where(text_char > 0, 1, 0) - elif text_char_onehot is not None and padding is None: - padding = jnp.where(text_char_onehot.argmax(-1) > 0, 1, 0) - padding_mask = padding[..., jnp.newaxis] - text_len = jnp.sum(padding, 1) - - if self.posemb_combine_type == 'add': - posemb_dim = None - elif self.posemb_combine_type == 'concat': - posemb_dim = self.word_char_emb_dim - else: - raise ValueError('Wrong feature_combine_type value.') - - # Character embeddings - if text_char is not None: - x = self.text_char_emb(text_char) - elif text_char_onehot is not None: - x = self.text_char_emb.attend(text_char_onehot) - elif text_char_emb is not None: - x = text_char_emb - else: - raise ValueError('Wrong inputs.') - - # Word embeddings - if text_word is not None: - text_word_emb_x = self.text_word_emb(text_word) - elif text_word_onehot is not None: - text_word_emb_x = self.text_word_emb.attend(text_word_onehot) - elif text_word_emb is not None: - text_word_emb_x = text_word_emb - else: - raise ValueError('Wrong inputs.') - - if self.feature_combine_type == 'add': - x = x + text_word_emb_x - elif self.feature_combine_type == 'concat': - x = jax.lax.concatenate([x, text_word_emb_x], 2) - else: - raise ValueError('Wrong feature_combine_type value.') - - # Positional embeddings - pe_init = common_layers.sinusoidal_init( - max_len=self.max_len) if self.learn_pos_emb else None - x = common_layers.AddPositionEmbs( - posemb_dim=posemb_dim, - posemb_init=pe_init, - max_len=self.max_len, - combine_type=self.posemb_combine_type, - name='posembed_input', - )( - x) - x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not is_training) - - # Set floating point - if self.use_bfloat16: - x = x.astype(jnp.bfloat16) - dtype = jnp.bfloat16 - else: - dtype = jnp.float32 - - if self.model_type == 'bigbird': - model_block = bigbird.BigBirdBlock - else: - raise ValueError('Wrong model type specified.') - - for lyr in range(self.num_layers): - x = model_block( - qkv_dim=self.qkv_dim, - mlp_dim=self.mlp_dim, - num_heads=self.num_heads, - dtype=dtype, - causal_mask=self.causal_mask, - dropout_rate=self.dropout_rate, - attention_dropout_rate=self.attention_dropout_rate, - deterministic=not is_training, - activation_fn=self.activation_fn, - connectivity_seed=lyr, - name=f'encoderblock_{lyr}', - )( - x, - padding_mask=padding_mask, - ) - x = common_layers.LayerNorm(dtype=dtype, name='encoder_norm')(x) - torso_output = x - - # Bert logits - if self.use_output_mlp: - x_mask = common_layers.MlpBlock( - out_dim=self.word_char_emb_dim, - mlp_dim=self.emb_dim, - dtype=dtype, - out_dropout=False, - dropout_rate=self.dropout_rate, - deterministic=not is_training, - activation_fn=self.activation_fn)( - x) - else: - x_mask = nn.Dense(self.word_char_emb_dim)(x) - - char_embeddings = self.text_char_emb.embedding - char_embeddings = nn.Dropout(rate=self.dropout_rate)( - char_embeddings, deterministic=not is_training) - logits_mask = jnp.matmul(x_mask, jnp.transpose(char_embeddings)) - - # Next sentence prediction - if self.use_output_mlp: - logits_nsp = common_layers.MlpBlock( - out_dim=2, - mlp_dim=self.emb_dim, - dtype=dtype, - out_dropout=False, - dropout_rate=self.dropout_rate, - deterministic=not is_training, - activation_fn=self.activation_fn)( - x) - else: - logits_nsp = nn.Dense(2)(x) - - # Average over temporal dimension - if self.region_date_pooling == 'average': - x = jnp.multiply(padding_mask.astype(jnp.float32), x) - x = jnp.sum(x, 1) / text_len.astype(jnp.float32)[..., None] - elif self.region_date_pooling == 'sum': - x = jnp.multiply(padding_mask.astype(jnp.float32), x) - x = jnp.sum(x, 1) - elif self.region_date_pooling == 'first': - x = x[:, 0, :] - else: - raise ValueError('Wrong pooling type specified.') - - # Date pred - if self.output_date_dist: - output_date_dim = self.output_date - else: - output_date_dim = 1 - - if self.use_output_mlp: - pred_date = common_layers.MlpBlock( - out_dim=output_date_dim, - mlp_dim=self.emb_dim, - dtype=dtype, - out_dropout=False, - dropout_rate=self.dropout_rate, - deterministic=not is_training, - activation_fn=self.activation_fn)( - x) - else: - pred_date = nn.Dense(output_date_dim)(x) - - # Region logits - if self.use_output_mlp: - logits_subregion = common_layers.MlpBlock( - out_dim=self.output_subregions, - mlp_dim=self.emb_dim, - dtype=dtype, - out_dropout=False, - dropout_rate=self.dropout_rate, - deterministic=not is_training, - activation_fn=self.activation_fn)( - x) - else: - logits_subregion = nn.Dense(self.output_subregions)(x) - - outputs = (pred_date, logits_subregion, logits_mask, logits_nsp) - if self.output_return_emb: - return outputs, torso_output - else: - return outputs diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/AnimationClipCreator.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/AnimationClipCreator.js deleted file mode 100644 index b87f7153e47ea6b41496bcf0ccefd66b5389b22b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/AnimationClipCreator.js +++ /dev/null @@ -1,114 +0,0 @@ -/** - * - * Creator of typical test AnimationClips / KeyframeTracks - * - * @author Ben Houston / http://clara.io/ - * @author David Sarno / http://lighthaus.us/ - */ - -THREE.AnimationClipCreator = function () {}; - -THREE.AnimationClipCreator.CreateRotationAnimation = function ( period, axis ) { - - var times = [ 0, period ], values = [ 0, 360 ]; - - axis = axis || 'x'; - var trackName = '.rotation[' + axis + ']'; - - var track = new THREE.NumberKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, period, [ track ] ); - -}; - -THREE.AnimationClipCreator.CreateScaleAxisAnimation = function ( period, axis ) { - - var times = [ 0, period ], values = [ 0, 1 ]; - - axis = axis || 'x'; - var trackName = '.scale[' + axis + ']'; - - var track = new THREE.NumberKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, period, [ track ] ); - -}; - -THREE.AnimationClipCreator.CreateShakeAnimation = function ( duration, shakeScale ) { - - var times = [], values = [], tmp = new THREE.Vector3(); - - for ( var i = 0; i < duration * 10; i ++ ) { - - times.push( i / 10 ); - - tmp.set( Math.random() * 2.0 - 1.0, Math.random() * 2.0 - 1.0, Math.random() * 2.0 - 1.0 ). - multiply( shakeScale ). - toArray( values, values.length ); - - } - - var trackName = '.position'; - - var track = new THREE.VectorKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, duration, [ track ] ); - -}; - - -THREE.AnimationClipCreator.CreatePulsationAnimation = function ( duration, pulseScale ) { - - var times = [], values = [], tmp = new THREE.Vector3(); - - for ( var i = 0; i < duration * 10; i ++ ) { - - times.push( i / 10 ); - - var scaleFactor = Math.random() * pulseScale; - tmp.set( scaleFactor, scaleFactor, scaleFactor ). - toArray( values, values.length ); - - } - - var trackName = '.scale'; - - var track = new THREE.VectorKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, duration, [ track ] ); - -}; - - -THREE.AnimationClipCreator.CreateVisibilityAnimation = function ( duration ) { - - var times = [ 0, duration / 2, duration ], values = [ true, false, true ]; - - var trackName = '.visible'; - - var track = new THREE.BooleanKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, duration, [ track ] ); - -}; - - -THREE.AnimationClipCreator.CreateMaterialColorAnimation = function ( duration, colors ) { - - var times = [], values = [], - timeStep = duration / colors.length; - - for ( var i = 0; i <= colors.length; i ++ ) { - - times.push( i * timeStep ); - values.push( colors[ i % colors.length ] ); - - } - - var trackName = '.material[0].color'; - - var track = new THREE.ColorKeyframeTrack( trackName, times, values ); - - return new THREE.AnimationClip( null, duration, [ track ] ); - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/postprocessing/AdaptiveToneMappingPass.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/postprocessing/AdaptiveToneMappingPass.js deleted file mode 100644 index 93d81504f35dddd7bcc94054f71d41bab406fd71..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/postprocessing/AdaptiveToneMappingPass.js +++ /dev/null @@ -1,348 +0,0 @@ -/** - * @author miibond - * Generate a texture that represents the luminosity of the current scene, adapted over time - * to simulate the optic nerve responding to the amount of light it is receiving. - * Based on a GDC2007 presentation by Wolfgang Engel titled "Post-Processing Pipeline" - * - * Full-screen tone-mapping shader based on http://www.graphics.cornell.edu/~jaf/publications/sig02_paper.pdf - */ - -THREE.AdaptiveToneMappingPass = function ( adaptive, resolution ) { - - THREE.Pass.call( this ); - - this.resolution = ( resolution !== undefined ) ? resolution : 256; - this.needsInit = true; - this.adaptive = adaptive !== undefined ? !! adaptive : true; - - this.luminanceRT = null; - this.previousLuminanceRT = null; - this.currentLuminanceRT = null; - - if ( THREE.CopyShader === undefined ) - console.error( "THREE.AdaptiveToneMappingPass relies on THREE.CopyShader" ); - - var copyShader = THREE.CopyShader; - - this.copyUniforms = THREE.UniformsUtils.clone( copyShader.uniforms ); - - this.materialCopy = new THREE.ShaderMaterial( { - - uniforms: this.copyUniforms, - vertexShader: copyShader.vertexShader, - fragmentShader: copyShader.fragmentShader, - blending: THREE.NoBlending, - depthTest: false - - } ); - - if ( THREE.LuminosityShader === undefined ) - console.error( "THREE.AdaptiveToneMappingPass relies on THREE.LuminosityShader" ); - - this.materialLuminance = new THREE.ShaderMaterial( { - - uniforms: THREE.UniformsUtils.clone( THREE.LuminosityShader.uniforms ), - vertexShader: THREE.LuminosityShader.vertexShader, - fragmentShader: THREE.LuminosityShader.fragmentShader, - blending: THREE.NoBlending - } ); - - this.adaptLuminanceShader = { - defines: { - "MIP_LEVEL_1X1": ( Math.log( this.resolution ) / Math.log( 2.0 ) ).toFixed( 1 ) - }, - uniforms: { - "lastLum": { value: null }, - "currentLum": { value: null }, - "minLuminance": { value: 0.01 }, - "delta": { value: 0.016 }, - "tau": { value: 1.0 } - }, - vertexShader: [ - "varying vec2 vUv;", - - "void main() {", - - " vUv = uv;", - " gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - ].join( '\n' ), - fragmentShader: [ - "varying vec2 vUv;", - - "uniform sampler2D lastLum;", - "uniform sampler2D currentLum;", - "uniform float minLuminance;", - "uniform float delta;", - "uniform float tau;", - - "void main() {", - - " vec4 lastLum = texture2D( lastLum, vUv, MIP_LEVEL_1X1 );", - " vec4 currentLum = texture2D( currentLum, vUv, MIP_LEVEL_1X1 );", - - " float fLastLum = max( minLuminance, lastLum.r );", - " float fCurrentLum = max( minLuminance, currentLum.r );", - - //The adaption seems to work better in extreme lighting differences - //if the input luminance is squared. - " fCurrentLum *= fCurrentLum;", - - // Adapt the luminance using Pattanaik's technique - " float fAdaptedLum = fLastLum + (fCurrentLum - fLastLum) * (1.0 - exp(-delta * tau));", - // "fAdaptedLum = sqrt(fAdaptedLum);", - " gl_FragColor.r = fAdaptedLum;", - "}" - ].join( '\n' ) - }; - - this.materialAdaptiveLum = new THREE.ShaderMaterial( { - - uniforms: THREE.UniformsUtils.clone( this.adaptLuminanceShader.uniforms ), - vertexShader: this.adaptLuminanceShader.vertexShader, - fragmentShader: this.adaptLuminanceShader.fragmentShader, - defines: Object.assign( {}, this.adaptLuminanceShader.defines ), - blending: THREE.NoBlending - } ); - - if ( THREE.ToneMapShader === undefined ) - console.error( "THREE.AdaptiveToneMappingPass relies on THREE.ToneMapShader" ); - - this.materialToneMap = new THREE.ShaderMaterial( { - - uniforms: THREE.UniformsUtils.clone( THREE.ToneMapShader.uniforms ), - vertexShader: THREE.ToneMapShader.vertexShader, - fragmentShader: THREE.ToneMapShader.fragmentShader, - blending: THREE.NoBlending - } ); - - this.fsQuad = new THREE.Pass.FullScreenQuad( null ); - -}; - -THREE.AdaptiveToneMappingPass.prototype = Object.assign( Object.create( THREE.Pass.prototype ), { - - constructor: THREE.AdaptiveToneMappingPass, - - render: function ( renderer, writeBuffer, readBuffer, deltaTime, maskActive ) { - - if ( this.needsInit ) { - - this.reset( renderer ); - - this.luminanceRT.texture.type = readBuffer.texture.type; - this.previousLuminanceRT.texture.type = readBuffer.texture.type; - this.currentLuminanceRT.texture.type = readBuffer.texture.type; - this.needsInit = false; - - } - - if ( this.adaptive ) { - - //Render the luminance of the current scene into a render target with mipmapping enabled - this.fsQuad.material = this.materialLuminance; - this.materialLuminance.uniforms.tDiffuse.value = readBuffer.texture; - renderer.setRenderTarget( this.currentLuminanceRT ); - this.fsQuad.render( renderer ); - - //Use the new luminance values, the previous luminance and the frame delta to - //adapt the luminance over time. - this.fsQuad.material = this.materialAdaptiveLum; - this.materialAdaptiveLum.uniforms.delta.value = deltaTime; - this.materialAdaptiveLum.uniforms.lastLum.value = this.previousLuminanceRT.texture; - this.materialAdaptiveLum.uniforms.currentLum.value = this.currentLuminanceRT.texture; - renderer.setRenderTarget( this.luminanceRT ); - this.fsQuad.render( renderer ); - - //Copy the new adapted luminance value so that it can be used by the next frame. - this.fsQuad.material = this.materialCopy; - this.copyUniforms.tDiffuse.value = this.luminanceRT.texture; - renderer.setRenderTarget( this.previousLuminanceRT ); - this.fsQuad.render( renderer ); - - } - - this.fsQuad.material = this.materialToneMap; - this.materialToneMap.uniforms.tDiffuse.value = readBuffer.texture; - - if ( this.renderToScreen ) { - - renderer.setRenderTarget( null ); - this.fsQuad.render( renderer ); - - } else { - - renderer.setRenderTarget( writeBuffer ); - - if ( this.clear ) renderer.clear(); - - this.fsQuad.render( renderer ); - - } - - }, - - reset: function ( renderer ) { - - // render targets - if ( this.luminanceRT ) { - - this.luminanceRT.dispose(); - - } - if ( this.currentLuminanceRT ) { - - this.currentLuminanceRT.dispose(); - - } - if ( this.previousLuminanceRT ) { - - this.previousLuminanceRT.dispose(); - - } - - var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat }; // was RGB format. changed to RGBA format. see discussion in #8415 / #8450 - - this.luminanceRT = new THREE.WebGLRenderTarget( this.resolution, this.resolution, pars ); - this.luminanceRT.texture.name = "AdaptiveToneMappingPass.l"; - this.luminanceRT.texture.generateMipmaps = false; - - this.previousLuminanceRT = new THREE.WebGLRenderTarget( this.resolution, this.resolution, pars ); - this.previousLuminanceRT.texture.name = "AdaptiveToneMappingPass.pl"; - this.previousLuminanceRT.texture.generateMipmaps = false; - - // We only need mipmapping for the current luminosity because we want a down-sampled version to sample in our adaptive shader - pars.minFilter = THREE.LinearMipMapLinearFilter; - pars.generateMipmaps = true; - this.currentLuminanceRT = new THREE.WebGLRenderTarget( this.resolution, this.resolution, pars ); - this.currentLuminanceRT.texture.name = "AdaptiveToneMappingPass.cl"; - - if ( this.adaptive ) { - - this.materialToneMap.defines[ "ADAPTED_LUMINANCE" ] = ""; - this.materialToneMap.uniforms.luminanceMap.value = this.luminanceRT.texture; - - } - //Put something in the adaptive luminance texture so that the scene can render initially - this.fsQuad.material = new THREE.MeshBasicMaterial( { color: 0x777777 } ); - this.materialLuminance.needsUpdate = true; - this.materialAdaptiveLum.needsUpdate = true; - this.materialToneMap.needsUpdate = true; - // renderer.render( this.scene, this.camera, this.luminanceRT ); - // renderer.render( this.scene, this.camera, this.previousLuminanceRT ); - // renderer.render( this.scene, this.camera, this.currentLuminanceRT ); - - }, - - setAdaptive: function ( adaptive ) { - - if ( adaptive ) { - - this.adaptive = true; - this.materialToneMap.defines[ "ADAPTED_LUMINANCE" ] = ""; - this.materialToneMap.uniforms.luminanceMap.value = this.luminanceRT.texture; - - } else { - - this.adaptive = false; - delete this.materialToneMap.defines[ "ADAPTED_LUMINANCE" ]; - this.materialToneMap.uniforms.luminanceMap.value = null; - - } - this.materialToneMap.needsUpdate = true; - - }, - - setAdaptionRate: function ( rate ) { - - if ( rate ) { - - this.materialAdaptiveLum.uniforms.tau.value = Math.abs( rate ); - - } - - }, - - setMinLuminance: function ( minLum ) { - - if ( minLum ) { - - this.materialToneMap.uniforms.minLuminance.value = minLum; - this.materialAdaptiveLum.uniforms.minLuminance.value = minLum; - - } - - }, - - setMaxLuminance: function ( maxLum ) { - - if ( maxLum ) { - - this.materialToneMap.uniforms.maxLuminance.value = maxLum; - - } - - }, - - setAverageLuminance: function ( avgLum ) { - - if ( avgLum ) { - - this.materialToneMap.uniforms.averageLuminance.value = avgLum; - - } - - }, - - setMiddleGrey: function ( middleGrey ) { - - if ( middleGrey ) { - - this.materialToneMap.uniforms.middleGrey.value = middleGrey; - - } - - }, - - dispose: function () { - - if ( this.luminanceRT ) { - - this.luminanceRT.dispose(); - - } - if ( this.previousLuminanceRT ) { - - this.previousLuminanceRT.dispose(); - - } - if ( this.currentLuminanceRT ) { - - this.currentLuminanceRT.dispose(); - - } - if ( this.materialLuminance ) { - - this.materialLuminance.dispose(); - - } - if ( this.materialAdaptiveLum ) { - - this.materialAdaptiveLum.dispose(); - - } - if ( this.materialCopy ) { - - this.materialCopy.dispose(); - - } - if ( this.materialToneMap ) { - - this.materialToneMap.dispose(); - - } - - } - -} ); diff --git a/spaces/banana-projects/web3d/node_modules/three/src/animation/AnimationAction.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/animation/AnimationAction.d.ts deleted file mode 100644 index a852880b3daaf0c66e70d1d8a9c6e3c7d6933c11..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/animation/AnimationAction.d.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { AnimationMixer } from './AnimationMixer'; -import { AnimationClip } from './AnimationClip'; -import { AnimationActionLoopStyles } from '../constants'; -// Animation //////////////////////////////////////////////////////////////////////////////////////// - -export class AnimationAction { - loop: AnimationActionLoopStyles; - time: number; - timeScale: number; - weight: number; - repetitions: number; - paused: boolean; - enabled: boolean; - clampWhenFinished: boolean; - zeroSlopeAtStart: boolean; - zeroSlopeAtEnd: boolean; - - play(): AnimationAction; - stop(): AnimationAction; - reset(): AnimationAction; - isRunning(): boolean; - startAt(time: number): AnimationAction; - setLoop( - mode: AnimationActionLoopStyles, - repetitions: number - ): AnimationAction; - setEffectiveWeight(weight: number): AnimationAction; - getEffectiveWeight(): number; - fadeIn(duration: number): AnimationAction; - fadeOut(duration: number): AnimationAction; - crossFadeFrom( - fadeOutAction: AnimationAction, - duration: number, - warp: boolean - ): AnimationAction; - crossFadeTo( - fadeInAction: AnimationAction, - duration: number, - warp: boolean - ): AnimationAction; - stopFading(): AnimationAction; - setEffectiveTimeScale(timeScale: number): AnimationAction; - getEffectiveTimeScale(): number; - setDuration(duration: number): AnimationAction; - syncWith(action: AnimationAction): AnimationAction; - halt(duration: number): AnimationAction; - warp( - statTimeScale: number, - endTimeScale: number, - duration: number - ): AnimationAction; - stopWarping(): AnimationAction; - getMixer(): AnimationMixer; - getClip(): AnimationClip; - getRoot(): any; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/objects/LineSegments.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/objects/LineSegments.d.ts deleted file mode 100644 index 67b32870ad7a3c5bde94dbbd8dc1b308c15491b4..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/objects/LineSegments.d.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { Geometry } from './../core/Geometry'; -import { Material } from './../materials/Material'; -import { Line } from './Line'; -import { BufferGeometry } from '../core/BufferGeometry'; - -/** - * @deprecated - */ -export const LineStrip: number; -/** - * @deprecated - */ -export const LinePieces: number; - -export class LineSegments extends Line { - constructor( - geometry?: Geometry | BufferGeometry, - material?: Material | Material[], - mode?: number - ); - - type: 'LineSegments'; - isLineSegments: true; -} diff --git a/spaces/bigcode/bigcode-editor/style.css b/spaces/bigcode/bigcode-editor/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/bigcode/bigcode-editor/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/face_restoration.py b/spaces/bigjoker/stable-diffusion-webui/modules/face_restoration.py deleted file mode 100644 index 2c86c6ccce338a1411f4367a0bc6e4046ad67cae..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/face_restoration.py +++ /dev/null @@ -1,19 +0,0 @@ -from modules import shared - - -class FaceRestoration: - def name(self): - return "None" - - def restore(self, np_image): - return np_image - - -def restore_faces(np_image): - face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] - if len(face_restorers) == 0: - return np_image - - face_restorer = face_restorers[0] - - return face_restorer.restore(np_image) diff --git a/spaces/bigjoker/stable-diffusion-webui/run.py b/spaces/bigjoker/stable-diffusion-webui/run.py deleted file mode 100644 index a5a7b75245e46e3ad2f8783e4cc42d37bbe720a8..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/run.py +++ /dev/null @@ -1,6 +0,0 @@ -# import os - -# if not os.path.exists("extensions/deforum"): -# exec(open("deforum.sh").read()) - -exec(open("run.sh").read()) diff --git a/spaces/bino-ocle/audio-intelligence-dash/app/styles.css b/spaces/bino-ocle/audio-intelligence-dash/app/styles.css deleted file mode 100644 index 5e945905e9266e85587c93ff9277199b6e2e96e0..0000000000000000000000000000000000000000 --- a/spaces/bino-ocle/audio-intelligence-dash/app/styles.css +++ /dev/null @@ -1,134 +0,0 @@ -body { - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Raleway, Helvetica, - Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; -} - -.logo { - width: 180px; -} - -.title { - font-weight: 600; - text-align: left; - color: black; - font-size: 18px; -} - -.alert, -#component-2, -#component-3 { - padding: 24px; - color: black; - background-color: #f4f8fb; - border: 1px solid #d6dce7; - border-radius: 8px; - box-shadow: 0px 6px 15px rgb(0 0 0 / 2%), 0px 2px 5px rgb(0 0 0 / 4%); -} - -ol { - list-style: disc; -} - -.alert__info { - background-color: #f4f8fb; - color: #323552; -} - -.alert__warning { - background-color: #fffae5; - color: #917115; - border: 1px solid #e4cf2b; -} - -#pw { - -webkit-text-security: disc; -} - -/* unvisited link */ -a:link { - color: #48DFDD; -} - -/* visited link */ -a:visited { - color: #48DFDD; -} - -/* mouse over link */ -a:hover { - color: #48DFDD; -} - -/* selected link */ -a:active { - color: #48DFDD; -} - -li { - margin-left: 1em; -} - -.apikey { -} - -.entity-list { - color: #48DFDD; - font-size: 16px -} - -.entity-elt { - color: black -}.istopic { -color: #48DFDD; -} - -.topic-L0 { -font-size: 30px; -text-indent: 0px; -} - -.topic-L1 { -font-size: 25px; -text-indent: 18px; -} - -.topic-L2 { -font-size: 20px; -text-indent: 36px; -} - -.topic-L3 { -font-size: 15px; -text-indent: 54px; -} - -.topic-L4 { -font-size: 15px; -text-indent: 72px; -} - -.topic-L5 { -font-size: 15px; -text-indent: 90px; -} - -.topic-L6 { -font-size: 15px; -text-indent: 108px; -} - -.topic-L7 { -font-size: 15px; -text-indent: 126px; -} - -.topic-L8 { -font-size: 15px; -text-indent: 144px; -} - -.topic-L9 { -font-size: 15px; -text-indent: 162px; -} - diff --git a/spaces/bioriAsaeru/text-to-voice/Dongle Emulator Eplan P8 2.2 18 Discover the Secrets of Using Eplan without a Dongle.md b/spaces/bioriAsaeru/text-to-voice/Dongle Emulator Eplan P8 2.2 18 Discover the Secrets of Using Eplan without a Dongle.md deleted file mode 100644 index 3b7e92361be5dedac7a3f5da0e75ee84dbdaa7f9..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dongle Emulator Eplan P8 2.2 18 Discover the Secrets of Using Eplan without a Dongle.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      Nothing illegal here (no any file, crack, patch, codes, keys, generators, dongle emulators links here), this is only site links resource as thousands other internet sites. Information on this website represents some compatibility list and is for our own knowledge. This absolutely does not mean that we are selling the illegal copies of these programs.

      -

      Dongle Emulator Eplan P8 2.2 18


      Download ☆☆☆ https://urloso.com/2uyRGM



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/modules/diffusion_schedule.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/modules/diffusion_schedule.py deleted file mode 100644 index 74ca6e3f2e7c4ff904d96dade315b0b46856778d..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/modules/diffusion_schedule.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Functions for Noise Schedule, defines diffusion process, reverse process and data processor. -""" - -from collections import namedtuple -import random -import typing as tp -import julius -import torch - -TrainingItem = namedtuple("TrainingItem", "noisy noise step") - - -def betas_from_alpha_bar(alpha_bar): - alphas = torch.cat([torch.Tensor([alpha_bar[0]]), alpha_bar[1:]/alpha_bar[:-1]]) - return 1 - alphas - - -class SampleProcessor(torch.nn.Module): - def project_sample(self, x: torch.Tensor): - """Project the original sample to the 'space' where the diffusion will happen.""" - return x - - def return_sample(self, z: torch.Tensor): - """Project back from diffusion space to the actual sample space.""" - return z - - -class MultiBandProcessor(SampleProcessor): - """ - MultiBand sample processor. The input audio is splitted across - frequency bands evenly distributed in mel-scale. - - Each band will be rescaled to match the power distribution - of Gaussian noise in that band, using online metrics - computed on the first few samples. - - Args: - n_bands (int): Number of mel-bands to split the signal over. - sample_rate (int): Sample rate of the audio. - num_samples (int): Number of samples to use to fit the rescaling - for each band. The processor won't be stable - until it has seen that many samples. - power_std (float or list/tensor): The rescaling factor computed to match the - power of Gaussian noise in each band is taken to - that power, i.e. `1.` means full correction of the energy - in each band, and values less than `1` means only partial - correction. Can be used to balance the relative importance - of low vs. high freq in typical audio signals. - """ - def __init__(self, n_bands: int = 8, sample_rate: float = 24_000, - num_samples: int = 10_000, power_std: tp.Union[float, tp.List[float], torch.Tensor] = 1.): - super().__init__() - self.n_bands = n_bands - self.split_bands = julius.SplitBands(sample_rate, n_bands=n_bands) - self.num_samples = num_samples - self.power_std = power_std - if isinstance(power_std, list): - assert len(power_std) == n_bands - power_std = torch.tensor(power_std) - self.register_buffer('counts', torch.zeros(1)) - self.register_buffer('sum_x', torch.zeros(n_bands)) - self.register_buffer('sum_x2', torch.zeros(n_bands)) - self.register_buffer('sum_target_x2', torch.zeros(n_bands)) - self.counts: torch.Tensor - self.sum_x: torch.Tensor - self.sum_x2: torch.Tensor - self.sum_target_x2: torch.Tensor - - @property - def mean(self): - mean = self.sum_x / self.counts - return mean - - @property - def std(self): - std = (self.sum_x2 / self.counts - self.mean**2).clamp(min=0).sqrt() - return std - - @property - def target_std(self): - target_std = self.sum_target_x2 / self.counts - return target_std - - def project_sample(self, x: torch.Tensor): - assert x.dim() == 3 - bands = self.split_bands(x) - if self.counts.item() < self.num_samples: - ref_bands = self.split_bands(torch.randn_like(x)) - self.counts += len(x) - self.sum_x += bands.mean(dim=(2, 3)).sum(dim=1) - self.sum_x2 += bands.pow(2).mean(dim=(2, 3)).sum(dim=1) - self.sum_target_x2 += ref_bands.pow(2).mean(dim=(2, 3)).sum(dim=1) - rescale = (self.target_std / self.std.clamp(min=1e-12)) ** self.power_std # same output size - bands = (bands - self.mean.view(-1, 1, 1, 1)) * rescale.view(-1, 1, 1, 1) - return bands.sum(dim=0) - - def return_sample(self, x: torch.Tensor): - assert x.dim() == 3 - bands = self.split_bands(x) - rescale = (self.std / self.target_std) ** self.power_std - bands = bands * rescale.view(-1, 1, 1, 1) + self.mean.view(-1, 1, 1, 1) - return bands.sum(dim=0) - - -class NoiseSchedule: - """Noise schedule for diffusion. - - Args: - beta_t0 (float): Variance of the first diffusion step. - beta_t1 (float): Variance of the last diffusion step. - beta_exp (float): Power schedule exponent - num_steps (int): Number of diffusion step. - variance (str): choice of the sigma value for the denoising eq. Choices: "beta" or "beta_tilde" - clip (float): clipping value for the denoising steps - rescale (float): rescaling value to avoid vanishing signals unused by default (i.e 1) - repartition (str): shape of the schedule only power schedule is supported - sample_processor (SampleProcessor): Module that normalize data to match better the gaussian distribution - noise_scale (float): Scaling factor for the noise - """ - def __init__(self, beta_t0: float = 1e-4, beta_t1: float = 0.02, num_steps: int = 1000, variance: str = 'beta', - clip: float = 5., rescale: float = 1., device='cuda', beta_exp: float = 1, - repartition: str = "power", alpha_sigmoid: dict = {}, n_bands: tp.Optional[int] = None, - sample_processor: SampleProcessor = SampleProcessor(), noise_scale: float = 1.0, **kwargs): - - self.beta_t0 = beta_t0 - self.beta_t1 = beta_t1 - self.variance = variance - self.num_steps = num_steps - self.clip = clip - self.sample_processor = sample_processor - self.rescale = rescale - self.n_bands = n_bands - self.noise_scale = noise_scale - assert n_bands is None - if repartition == "power": - self.betas = torch.linspace(beta_t0 ** (1 / beta_exp), beta_t1 ** (1 / beta_exp), num_steps, - device=device, dtype=torch.float) ** beta_exp - else: - raise RuntimeError('Not implemented') - self.rng = random.Random(1234) - - def get_beta(self, step: tp.Union[int, torch.Tensor]): - if self.n_bands is None: - return self.betas[step] - else: - return self.betas[:, step] # [n_bands, len(step)] - - def get_initial_noise(self, x: torch.Tensor): - if self.n_bands is None: - return torch.randn_like(x) - return torch.randn((x.size(0), self.n_bands, x.size(2))) - - def get_alpha_bar(self, step: tp.Optional[tp.Union[int, torch.Tensor]] = None) -> torch.Tensor: - """Return 'alpha_bar', either for a given step, or as a tensor with its value for each step.""" - if step is None: - return (1 - self.betas).cumprod(dim=-1) # works for simgle and multi bands - if type(step) is int: - return (1 - self.betas[:step + 1]).prod() - else: - return (1 - self.betas).cumprod(dim=0)[step].view(-1, 1, 1) - - def get_training_item(self, x: torch.Tensor, tensor_step: bool = False) -> TrainingItem: - """Create a noisy data item for diffusion model training: - - Args: - x (torch.Tensor): clean audio data torch.tensor(bs, 1, T) - tensor_step (bool): If tensor_step = false, only one step t is sample, - the whole batch is diffused to the same step and t is int. - If tensor_step = true, t is a tensor of size (x.size(0),) - every element of the batch is diffused to a independently sampled. - """ - step: tp.Union[int, torch.Tensor] - if tensor_step: - bs = x.size(0) - step = torch.randint(0, self.num_steps, size=(bs,), device=x.device) - else: - step = self.rng.randrange(self.num_steps) - alpha_bar = self.get_alpha_bar(step) # [batch_size, n_bands, 1] - - x = self.sample_processor.project_sample(x) - noise = torch.randn_like(x) - noisy = (alpha_bar.sqrt() / self.rescale) * x + (1 - alpha_bar).sqrt() * noise * self.noise_scale - return TrainingItem(noisy, noise, step) - - def generate(self, model: torch.nn.Module, initial: tp.Optional[torch.Tensor] = None, - condition: tp.Optional[torch.Tensor] = None, return_list: bool = False): - """Full ddpm reverse process. - - Args: - model (nn.Module): Diffusion model. - initial (tensor): Initial Noise. - condition (tensor): Input conditionning Tensor (e.g. encodec compressed representation). - return_list (bool): Whether to return the whole process or only the sampled point. - """ - alpha_bar = self.get_alpha_bar(step=self.num_steps - 1) - current = initial - iterates = [initial] - for step in range(self.num_steps)[::-1]: - with torch.no_grad(): - estimate = model(current, step, condition=condition).sample - alpha = 1 - self.betas[step] - previous = (current - (1 - alpha) / (1 - alpha_bar).sqrt() * estimate) / alpha.sqrt() - previous_alpha_bar = self.get_alpha_bar(step=step - 1) - if step == 0: - sigma2 = 0 - elif self.variance == 'beta': - sigma2 = 1 - alpha - elif self.variance == 'beta_tilde': - sigma2 = (1 - previous_alpha_bar) / (1 - alpha_bar) * (1 - alpha) - elif self.variance == 'none': - sigma2 = 0 - else: - raise ValueError(f'Invalid variance type {self.variance}') - - if sigma2 > 0: - previous += sigma2**0.5 * torch.randn_like(previous) * self.noise_scale - if self.clip: - previous = previous.clamp(-self.clip, self.clip) - current = previous - alpha_bar = previous_alpha_bar - if step == 0: - previous *= self.rescale - if return_list: - iterates.append(previous.cpu()) - - if return_list: - return iterates - else: - return self.sample_processor.return_sample(previous) - - def generate_subsampled(self, model: torch.nn.Module, initial: torch.Tensor, step_list: tp.Optional[list] = None, - condition: tp.Optional[torch.Tensor] = None, return_list: bool = False): - """Reverse process that only goes through Markov chain states in step_list.""" - if step_list is None: - step_list = list(range(1000))[::-50] + [0] - alpha_bar = self.get_alpha_bar(step=self.num_steps - 1) - alpha_bars_subsampled = (1 - self.betas).cumprod(dim=0)[list(reversed(step_list))].cpu() - betas_subsampled = betas_from_alpha_bar(alpha_bars_subsampled) - current = initial * self.noise_scale - iterates = [current] - for idx, step in enumerate(step_list[:-1]): - with torch.no_grad(): - estimate = model(current, step, condition=condition).sample * self.noise_scale - alpha = 1 - betas_subsampled[-1 - idx] - previous = (current - (1 - alpha) / (1 - alpha_bar).sqrt() * estimate) / alpha.sqrt() - previous_alpha_bar = self.get_alpha_bar(step_list[idx + 1]) - if step == step_list[-2]: - sigma2 = 0 - previous_alpha_bar = torch.tensor(1.0) - else: - sigma2 = (1 - previous_alpha_bar) / (1 - alpha_bar) * (1 - alpha) - if sigma2 > 0: - previous += sigma2**0.5 * torch.randn_like(previous) * self.noise_scale - if self.clip: - previous = previous.clamp(-self.clip, self.clip) - current = previous - alpha_bar = previous_alpha_bar - if step == 0: - previous *= self.rescale - if return_list: - iterates.append(previous.cpu()) - if return_list: - return iterates - else: - return self.sample_processor.return_sample(previous) diff --git a/spaces/brayden-gg/decoupled-style-descriptors/app.py b/spaces/brayden-gg/decoupled-style-descriptors/app.py deleted file mode 100644 index 0dd9c944844a8b401faf9753b19cfa3abd664603..0000000000000000000000000000000000000000 --- a/spaces/brayden-gg/decoupled-style-descriptors/app.py +++ /dev/null @@ -1,286 +0,0 @@ -import torch -import numpy as np -from helper import * -from config.GlobalVariables import * -from SynthesisNetwork import SynthesisNetwork -from DataLoader import DataLoader -import convenience -import gradio as gr - - -def update_chosen_writers(writer1, writer2, weight, words, all_loaded_data): - net.clamp_mdn = 0 - chosen_writers = [int(writer1.split(" ")[1]), int(writer2.split(" ")[1])] - - all_loaded_data = [] - for writer_id in chosen_writers: - loaded_data = dl.next_batch(TYPE='TRAIN', uid=writer_id, tids=list(range(num_samples))) - all_loaded_data.append(loaded_data) - - writer_mean_Ws = [] - for loaded_data in all_loaded_data: - mean_global_W = convenience.get_mean_global_W(net, loaded_data, device) - writer_mean_Ws.append(mean_global_W.detach()) - - return gr.Slider.update(label=f"{writer1} vs. {writer2}"), chosen_writers, writer_mean_Ws, *update_writer_word(" ".join(words), writer_mean_Ws, all_loaded_data, weight) - -def update_writer_word(target_word, writer_mean_Ws, all_loaded_data, writer_weight, device="cpu"): - words = [] - for word in target_word.split(" "): - if len(word) > 0: - words.append(word) - - word_Ws = [] - word_Cs = [] - for word in words: - writer_Ws, writer_Cs = convenience.get_DSD(net, word, writer_mean_Ws, all_loaded_data, device) - word_Ws.append(writer_Ws) - word_Cs.append(writer_Cs) - - if len(words) == 0: - word_Ws.append(torch.tensor([])) - word_Cs.append(torch.tensor([])) - - return words, word_Ws, word_Cs, *update_writer_slider(writer_weight, words, word_Ws, word_Cs) - -def update_writer_slider(weight, words, all_word_Ws, all_word_Cs): - weights = [1 - weight, weight] - net.clamp_mdn = 0 - svg = convenience.draw_words_svg(words, all_word_Ws, all_word_Cs, weights, net) - return gr.HTML.update(value=svg.tostring()), gr.File.update(visible=False), gr.Button.update(visible=True), weight, svg - - -def update_writer_download(writer_svg): - writer_svg.saveas("./DSD_writer_interpolation.svg") - return gr.File.update(value="./DSD_writer_interpolation.svg", visible=True), gr.Button.update(visible=False) - -# for character blend -def update_blend_chars(c1, c2, weight, char_Ws): - blend_chars = [c1, c2] - char_Cs = torch.zeros(1, 2, convenience.L, convenience.L) - for i in range(2): # get corners of grid - _, char_matrix = convenience.get_DSD(net, blend_chars[i], default_mean_global_W, [default_loaded_data], device) - char_Cs[:, i, :, :] = char_matrix - - return gr.Slider.update(label=f"'{c1}' vs. '{c2}'"), char_Cs.detach(), blend_chars, *update_char_slider(weight, char_Ws, char_Cs, blend_chars) - -def update_char_slider(weight, char_Ws, char_Cs, blend_chars): - """Generates an image of handwritten text based on target_sentence""" - net.clamp_mdn = 0 - character_weights = [1 - weight, weight] - - all_W_c = convenience.get_character_blend_W_c(character_weights, char_Ws, char_Cs) - all_commands = convenience.get_commands(net, blend_chars[0], all_W_c) - svg = convenience.commands_to_svg(all_commands, 750, 160, 375) - return gr.HTML.update(value=svg.tostring()), gr.File.update(visible=False), gr.Button.update(visible=True), weight, svg - -def update_char_download(char_svg): - char_svg.saveas("./DSD_char_interpolation.svg") - return gr.File.update(value="./DSD_char_interpolation.svg", visible=True), gr.Button.update(visible=False) - -# for MDN -def update_mdn_word(target_word, scale_sd, clamp_mdn): - mdn_words = [] - for word in target_word.split(" "): - mdn_words.append(word) - - all_word_mdn_Ws = [] - all_word_mdn_Cs = [] - for word in mdn_words: - all_writer_Ws, all_writer_Cs = convenience.get_DSD(net, word, default_mean_global_W, [default_loaded_data], device) - all_word_mdn_Ws.append(all_writer_Ws) - all_word_mdn_Cs.append(all_writer_Cs) - - return mdn_words, all_word_mdn_Ws, all_word_mdn_Cs, *sample_mdn(scale_sd, clamp_mdn, mdn_words, all_word_mdn_Ws, all_word_mdn_Cs) - - -def sample_mdn(maxs, maxr, mdn_words, all_word_mdn_Ws, all_word_mdn_Cs): - net.clamp_mdn = maxr - net.scale_sd = maxs - svg = convenience.draw_words_svg(mdn_words, all_word_mdn_Ws, all_word_mdn_Cs, [1], net) - return gr.HTML.update(value=svg.tostring()), gr.File.update(visible=False), gr.Button.update(visible=True), maxr, maxs, svg - -def update_mdn_download(mdn_svg): - mdn_svg.saveas("./DSD_add_randomness.svg") - return gr.File.update(value="./DSD_add_randomness.svg", visible=True), gr.Button.update(visible=False) - -device = 'cpu' -num_samples = 10 - -net = SynthesisNetwork(weight_dim=256, num_layers=3).to(device) - -if not torch.cuda.is_available(): - net.load_state_dict(torch.load('./model/250000.pt', map_location=torch.device(device))["model_state_dict"]) - - -dl = DataLoader(num_writer=1, num_samples=10, divider=5.0, datadir='./data/writers') - -writer_options = [5, 14, 15, 16, 17, 22, 25, 80, 120, 137, 147, 151] -all_loaded_data_DEFAULT = [] -chosen_writers_DEFAULT = [120, 80] -avail_char = "0 1 2 3 4 5 6 7 8 9 a b c d e f g h i j k l m n o p q r s t u v w x y z A B C D E F G H I J K L M N O P Q R S T U V W X Y Z ! ? \" ' * + - = : ; , . < > \ / [ ] ( ) # $ % &" -avail_char_list = avail_char.split(" ") -for writer_id in chosen_writers_DEFAULT: - loaded_data = dl.next_batch(TYPE='TRAIN', uid=writer_id, tids=list(range(num_samples))) - all_loaded_data_DEFAULT.append(loaded_data) - -default_loaded_data = all_loaded_data_DEFAULT[-1] -default_mean_global_W = convenience.get_mean_global_W(net, default_loaded_data, device) - -# data for writer interpolation -writer_words_DEFAULT = ["hello", "world"] -writer_mean_Ws_DEFAULT = [] -writer_all_word_Ws_DEFAULT = [] -writer_all_word_Cs_DEFAULT = [] -writer_weight_DEFAULT = 0.7 -writer_svg_DEFAULT = None - -# data for char interpolation -char_chosen_DEFAULT = ["y", "s"] -char_mean_global_W_DEFAULT = None -char_weight_DEFAULT = 0.7 -char_Ws_DEFAULT = default_mean_global_W.reshape(1, 1, convenience.L) -char_Cs_DEFAULT = None -char_svg_DEFAULT = None - -# # data for MDN -mdn_words_DEFAULT = ["hello", "world"] -all_word_mdn_Ws_DEFAULT = None -all_word_mdn_Cs_DEFAULT = None -clamp_mdn_DEFAULT = 0.5 -scale_sd_DEFAULT = 1 -mdn_svg_DEFAULT = None - -_wrds, writer_all_word_Ws_DEFAULT, writer_all_word_Cs_DEFAULT, _html, _file, _btn, _wt, _svg = update_writer_word(" ".join(writer_words_DEFAULT), writer_mean_Ws_DEFAULT, all_loaded_data_DEFAULT, writer_weight_DEFAULT) -_sldr, _wrtrs, writer_mean_Ws_DEFAULT, _wrds, _waww, _wawc, _html, _file, _btn, _wt, writer_svg_DEFAULT = update_chosen_writers(f"Writer {chosen_writers_DEFAULT[0]}", f"Writer {chosen_writers_DEFAULT[1]}", writer_weight_DEFAULT, writer_words_DEFAULT, all_loaded_data_DEFAULT) - -_wrds, all_word_mdn_Ws_DEFAULT, all_word_mdn_Cs_DEFAULT, _html, _file, _btn, _maxr, _maxs, mdn_svg_DEFAULT = update_mdn_word(" ".join(mdn_words_DEFAULT), scale_sd_DEFAULT, clamp_mdn_DEFAULT) -_sldr, char_Cs_DEFAULT, _chrs, _html, _file, _btn, _wght, char_svg_DEFAULT = update_blend_chars(*char_chosen_DEFAULT, char_weight_DEFAULT, char_Ws_DEFAULT) - -with gr.Blocks() as demo: - all_loaded_data_var = gr.Variable(all_loaded_data_DEFAULT) - chosen_writers_var = gr.Variable(chosen_writers_DEFAULT) - # data for writer interpolation - writer_words_var = gr.Variable(writer_words_DEFAULT) - writer_mean_Ws_var = gr.Variable(writer_mean_Ws_DEFAULT) - writer_all_word_Ws_var = gr.Variable([e.detach() for e in writer_all_word_Ws_DEFAULT]) - writer_all_word_Cs_var = gr.Variable([e.detach() for e in writer_all_word_Cs_DEFAULT]) - writer_weight_var = gr.Variable(writer_weight_DEFAULT) - writer_svg_var = gr.Variable(writer_svg_DEFAULT) - # data for char interpolation - char_chosen_var = gr.Variable(char_chosen_DEFAULT) - char_mean_global_W_var = gr.Variable(char_mean_global_W_DEFAULT) - char_weight_var = gr.Variable(char_weight_DEFAULT) - char_Ws_var = gr.Variable(char_Ws_DEFAULT.detach()) - char_Cs_var = gr.Variable(char_Cs_DEFAULT.detach()) - char_svg_var = gr.Variable(char_svg_DEFAULT) - # # data for MDN - mdn_words_var = gr.Variable(mdn_words_DEFAULT) - all_word_mdn_Ws_var = gr.Variable([e.detach() for e in all_word_mdn_Ws_DEFAULT]) - all_word_mdn_Cs_var = gr.Variable([e.detach() for e in all_word_mdn_Cs_DEFAULT]) - clamp_mdn_var = gr.Variable(clamp_mdn_DEFAULT) - scale_sd_var = gr.Variable(scale_sd_DEFAULT) - mdn_svg_var = gr.Variable(mdn_svg_DEFAULT) - - with gr.Tabs(): - with gr.TabItem("Blend Writers"): - target_word = gr.Textbox(label="Target Word", value=" ".join(writer_words_DEFAULT), max_lines=1) - with gr.Row(): - left_ratio_options = ["Style " + str(id) for i, id in enumerate(writer_options) if i % 2 == 0] - right_ratio_options = ["Style " + str(id) for i, id in enumerate(writer_options) if i % 2 == 1] - with gr.Column(): - writer1 = gr.Radio(left_ratio_options, value="Style 120", label="Style for first writer") - with gr.Column(): - writer2 = gr.Radio(right_ratio_options, value="Style 80", label="Style for second writer") - with gr.Row(): - writer_slider = gr.Slider(0, 1, value=writer_weight_DEFAULT, label="Style 120 vs. Style 80") - with gr.Row(): - writer_default_image = update_writer_slider(writer_weight_DEFAULT, writer_words_DEFAULT, writer_all_word_Ws_DEFAULT, writer_all_word_Cs_DEFAULT) - writer_output = gr.HTML(writer_default_image[0]["value"]) - with gr.Row(): - writer_download_btn = gr.Button("Save to SVG file") - writer_download_btn.style(full_width="true") - writer_download = gr.File(interactive=False, show_label=False, visible=False) - - writer_slider.change(fn=update_writer_slider, - inputs=[writer_slider, writer_words_var, writer_all_word_Ws_var, writer_all_word_Cs_var], - outputs=[writer_output, writer_download, writer_download_btn, writer_weight_var, writer_svg_var], show_progress=False) - target_word.submit(fn=update_writer_word, - inputs=[target_word, writer_mean_Ws_var, all_loaded_data_var, writer_weight_var], - outputs=[writer_words_var, writer_all_word_Ws_var, writer_all_word_Cs_var, writer_output, writer_download, writer_download_btn, writer_weight_var, writer_svg_var], show_progress=False) - writer1.change(fn=update_chosen_writers, - inputs=[writer1, writer2, writer_weight_var, writer_words_var, all_loaded_data_var], - outputs=[writer_slider, chosen_writers_var, writer_mean_Ws_var, writer_words_var, writer_all_word_Ws_var, writer_all_word_Cs_var, writer_output, writer_download, writer_download_btn, writer_weight_var, writer_svg_var]) - writer2.change(fn=update_chosen_writers, - inputs=[writer1, writer2, writer_weight_var, writer_words_var, all_loaded_data_var], - outputs=[writer_slider, chosen_writers_var, writer_mean_Ws_var, writer_words_var, writer_all_word_Ws_var, writer_all_word_Cs_var, writer_output, writer_download, writer_download_btn, writer_weight_var, writer_svg_var]) - writer_download_btn.click(fn=update_writer_download, - inputs=[writer_svg_var], - outputs=[writer_download, writer_download_btn]) - - with gr.TabItem("Blend Characters"): - with gr.Row(): - with gr.Column(): - char1 = gr.Dropdown(choices=avail_char_list, value=char_chosen_DEFAULT[0], label="Character 1") - with gr.Column(): - char2 = gr.Dropdown(choices=avail_char_list, value=char_chosen_DEFAULT[1], label="Character 2") - with gr.Row(): - char_slider = gr.Slider(0, 1, value=char_weight_DEFAULT, label=f"'{char_chosen_DEFAULT[0]}' vs. '{char_chosen_DEFAULT[1]}'") - with gr.Row(): - char_default_image = update_char_slider(char_weight_DEFAULT, char_Ws_DEFAULT, char_Cs_DEFAULT, char_chosen_DEFAULT) - char_output = gr.HTML(char_default_image[0]["value"]) - with gr.Row(): - char_download_btn = gr.Button("Save to SVG file") - char_download_btn.style(full_width="true") - char_download = gr.File(interactive=False, show_label=False, visible=False) - - char_slider.change(fn=update_char_slider, - inputs=[char_slider, char_Ws_var, char_Cs_var, char_chosen_var], - outputs=[char_output, char_download, char_download_btn, char_weight_var, char_svg_var], show_progress=False) - - char1.change(fn=update_blend_chars, - inputs=[char1, char2, char_weight_var, char_Ws_var], - outputs=[char_slider, char_Cs_var, char_chosen_var, char_output, char_download, char_download_btn, char_weight_var, char_svg_var]) - char2.change(fn=update_blend_chars, - inputs=[char1, char2, char_weight_var, char_Ws_var], - outputs=[char_slider, char_Cs_var, char_chosen_var, char_output, char_download, char_download_btn, char_weight_var, char_svg_var]) - - char_download_btn.click(fn=update_char_download, - inputs=[char_svg_var], - outputs=[char_download, char_download_btn], show_progress=True) - - with gr.TabItem("Add Randomness"): - mdn_word = gr.Textbox(label="Target Word", value=" ".join(mdn_words_DEFAULT), max_lines=1) - with gr.Row(): - with gr.Column(): - max_rand = gr.Slider(0, 1, value=clamp_mdn_DEFAULT, label="Maximum Randomness") - with gr.Column(): - scale_rand = gr.Slider(0, 3, value=scale_sd_DEFAULT, label="Scale of Randomness") - with gr.Row(): - mdn_sample_button = gr.Button(value="Resample") - with gr.Row(): - default_im = sample_mdn(scale_sd_DEFAULT, clamp_mdn_DEFAULT, mdn_words_DEFAULT, all_word_mdn_Ws_DEFAULT, all_word_mdn_Cs_DEFAULT) - mdn_output = gr.HTML(default_im[0]["value"]) - with gr.Row(): - randomness_download_btn = gr.Button("Save to SVG file") - randomness_download = gr.File(interactive=False, show_label=False, visible=False) - - max_rand.change(fn=sample_mdn, - inputs=[scale_rand, max_rand, mdn_words_var, all_word_mdn_Ws_var, all_word_mdn_Cs_var], - outputs=[mdn_output, randomness_download, randomness_download_btn, clamp_mdn_var, scale_sd_var, mdn_svg_var], show_progress=False) - scale_rand.change(fn=sample_mdn, - inputs=[scale_rand, max_rand, mdn_words_var, all_word_mdn_Ws_var, all_word_mdn_Cs_var], - outputs=[mdn_output, randomness_download, randomness_download_btn, clamp_mdn_var, scale_sd_var, mdn_svg_var], show_progress=False) - mdn_sample_button.click(fn=sample_mdn, - inputs=[scale_rand, max_rand, mdn_words_var, all_word_mdn_Ws_var, all_word_mdn_Cs_var], - outputs=[mdn_output, randomness_download, randomness_download_btn, clamp_mdn_var, scale_sd_var, mdn_svg_var], show_progress=False) - - mdn_word.submit(fn=update_mdn_word, - inputs=[mdn_word, scale_sd_var, clamp_mdn_var], - outputs=[mdn_words_var, all_word_mdn_Ws_var, all_word_mdn_Cs_var, mdn_output, randomness_download, randomness_download_btn, clamp_mdn_var, scale_sd_var, mdn_svg_var], show_progress=False) - - randomness_download_btn.click(fn=update_mdn_download, - inputs=[mdn_svg_var], - outputs=[randomness_download, randomness_download_btn]) - randomness_download_btn.style(full_width="true") -demo.launch() diff --git a/spaces/breadlicker45/the-jam-machine-app/constants.py b/spaces/breadlicker45/the-jam-machine-app/constants.py deleted file mode 100644 index 5c3405735f543ba206e6febd959f3baad40e4346..0000000000000000000000000000000000000000 --- a/spaces/breadlicker45/the-jam-machine-app/constants.py +++ /dev/null @@ -1,121 +0,0 @@ -# fmt: off -# Instrument mapping and mapping functions -INSTRUMENT_CLASSES = [ - {"name": "Piano", "program_range": range(0, 8), "family_number": 0}, - {"name": "Chromatic Percussion", "program_range": range(8, 16), "family_number": 1}, - {"name": "Organ", "program_range": range(16, 24), "family_number": 2}, - {"name": "Guitar", "program_range": range(24, 32), "family_number": 3}, - {"name": "Bass", "program_range": range(32, 40), "family_number": 4}, - {"name": "Strings", "program_range": range(40, 48), "family_number": 5}, - {"name": "Ensemble", "program_range": range(48, 56), "family_number": 6}, - {"name": "Brass", "program_range": range(56, 64), "family_number": 7}, - {"name": "Reed", "program_range": range(64, 72), "family_number": 8}, - {"name": "Pipe", "program_range": range(72, 80), "family_number": 9}, - {"name": "Synth Lead", "program_range": range(80, 88), "family_number": 10}, - {"name": "Synth Pad", "program_range": range(88, 96), "family_number": 11}, - {"name": "Synth Effects", "program_range": range(96, 104), "family_number": 12}, - {"name": "Ethnic", "program_range": range(104, 112), "family_number": 13}, - {"name": "Percussive", "program_range": range(112, 120), "family_number": 14}, - {"name": "Sound Effects", "program_range": range(120, 128), "family_number": 15,}, -] -# fmt: on - -# Instrument mapping for decodiing our midi sequence into midi instruments of our choice -INSTRUMENT_TRANSFER_CLASSES = [ - { - "name": "Piano", - "program_range": [4], - "family_number": 0, - "transfer_to": "Electric Piano 1", - }, - { - "name": "Chromatic Percussion", - "program_range": [11], - "family_number": 1, - "transfer_to": "Vibraphone", - }, - { - "name": "Organ", - "program_range": [17], - "family_number": 2, - "transfer_to": "Percussive Organ", - }, - { - "name": "Guitar", - "program_range": [80], - "family_number": 3, - "transfer_to": "Synth Lead Square", - }, - { - "name": "Bass", - "program_range": [38], - "family_number": 4, - "transfer_to": "Synth Bass 1", - }, - { - "name": "Strings", - "program_range": [50], - "family_number": 5, - "transfer_to": "Synth Strings 1", - }, - { - "name": "Ensemble", - "program_range": [51], - "family_number": 6, - "transfer_to": "Synth Strings 2", - }, - { - "name": "Brass", - "program_range": [63], - "family_number": 7, - "transfer_to": "Synth Brass 1", - }, - { - "name": "Reed", - "program_range": [64], - "family_number": 8, - "transfer_to": "Synth Brass 2", - }, - { - "name": "Pipe", - "program_range": [82], - "family_number": 9, - "transfer_to": "Synth Lead Calliope", - }, - { - "name": "Synth Lead", - "program_range": [81], # Synth Lead Sawtooth - "family_number": 10, - "transfer_to": "Synth Lead Sawtooth", - }, - { - "name": "Synth Pad", - "program_range": range(88, 96), - "family_number": 11, - "transfer_to": "Synth Pad", - }, - { - "name": "Synth Effects", - "program_range": range(96, 104), - "family_number": 12, - "transfer_to": "Synth Effects", - }, - { - "name": "Ethnic", - "program_range": range(104, 112), - "family_number": 13, - "transfer_to": "Ethnic", - }, - { - "name": "Percussive", - "program_range": range(112, 120), - "family_number": 14, - "transfer_to": "Percussive", - }, - { - "name": "Sound Effects", - "program_range": range(120, 128), - "family_number": 15, - "transfer_to": "Sound Effects", - }, -] diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/__init__.py deleted file mode 100644 index 260ccb9c43e5aa2d0f1fd28cfcbdd4f31913d16a..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/datasets/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from . import builtin # ensure the builtin datasets are registered - -__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/__init__.py deleted file mode 100644 index ed32c5e9d6c4c1599ba960681d9e86889e2cdbd8..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .chart import DensePoseChartPredictorOutput -from .chart_confidence import decorate_predictor_output_class_with_confidences -from .cse_confidence import decorate_cse_predictor_output_class_with_confidences -from .chart_result import ( - DensePoseChartResult, - DensePoseChartResultWithConfidences, - quantize_densepose_chart_result, - compress_quantized_densepose_chart_result, - decompress_compressed_densepose_chart_result, -) -from .cse import DensePoseEmbeddingPredictorOutput -from .data_relative import DensePoseDataRelative -from .list import DensePoseList -from .mesh import Mesh, create_mesh -from .transform_data import DensePoseTransformData, normalized_coords_transform diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/loggers/wandb/log_dataset.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb693072c99703e5c52b169892b7fd9a8cc..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/latent_diffusion/attention.py b/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/latent_diffusion/attention.py deleted file mode 100644 index 583dd169e7ec9502ee29faeb12689a46494838c0..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/latent_diffusion/attention.py +++ /dev/null @@ -1,468 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn -from einops import rearrange - -from audioldm.latent_diffusion.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return {el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = ( - nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) - if not glu - else GEGLU(dim, inner_dim) - ) - - self.net = nn.Sequential( - project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm( - num_groups=32, num_channels=in_channels, eps=1e-6, affine=True - ) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange( - qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3 - ) - k = k.softmax(dim=-1) - context = torch.einsum("bhdn,bhen->bhde", k, v) - out = torch.einsum("bhde,bhdn->bhen", context, q) - out = rearrange( - out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w - ) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q.shape - q = rearrange(q, "b c h w -> b (h w) c") - k = rearrange(k, "b c h w -> b c (h w)") - w_ = torch.einsum("bij,bjk->bik", q, k) - - w_ = w_ * (int(c) ** (-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, "b c h w -> b c (h w)") - w_ = rearrange(w_, "b i j -> b j i") - h_ = torch.einsum("bij,bjk->bik", v, w_) - h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) - h_ = self.proj_out(h_) - - return x + h_ - - -class CrossAttention(nn.Module): - """ - ### Cross Attention Layer - This falls-back to self-attention when conditional embeddings are not specified. - """ - - # use_flash_attention: bool = True - use_flash_attention: bool = False - def __init__( - self, - query_dim, - context_dim=None, - heads=8, - dim_head=64, - dropout=0.0, - is_inplace: bool = True, - ): - # def __init__(self, d_model: int, d_cond: int, n_heads: int, d_head: int, is_inplace: bool = True): - """ - :param d_model: is the input embedding size - :param n_heads: is the number of attention heads - :param d_head: is the size of a attention head - :param d_cond: is the size of the conditional embeddings - :param is_inplace: specifies whether to perform the attention softmax computation inplace to - save memory - """ - super().__init__() - - self.is_inplace = is_inplace - self.n_heads = heads - self.d_head = dim_head - - # Attention scaling factor - self.scale = dim_head**-0.5 - - # The normal self-attention layer - if context_dim is None: - context_dim = query_dim - - # Query, key and value mappings - d_attn = dim_head * heads - self.to_q = nn.Linear(query_dim, d_attn, bias=False) - self.to_k = nn.Linear(context_dim, d_attn, bias=False) - self.to_v = nn.Linear(context_dim, d_attn, bias=False) - - # Final linear layer - self.to_out = nn.Sequential(nn.Linear(d_attn, query_dim), nn.Dropout(dropout)) - - # Setup [flash attention](https://github.com/HazyResearch/flash-attention). - # Flash attention is only used if it's installed - # and `CrossAttention.use_flash_attention` is set to `True`. - try: - # You can install flash attention by cloning their Github repo, - # [https://github.com/HazyResearch/flash-attention](https://github.com/HazyResearch/flash-attention) - # and then running `python setup.py install` - from flash_attn.flash_attention import FlashAttention - - self.flash = FlashAttention() - # Set the scale for scaled dot-product attention. - self.flash.softmax_scale = self.scale - # Set to `None` if it's not installed - except ImportError: - self.flash = None - - def forward(self, x, context=None, mask=None): - """ - :param x: are the input embeddings of shape `[batch_size, height * width, d_model]` - :param cond: is the conditional embeddings of shape `[batch_size, n_cond, d_cond]` - """ - - # If `cond` is `None` we perform self attention - has_cond = context is not None - if not has_cond: - context = x - - # Get query, key and value vectors - q = self.to_q(x) - k = self.to_k(context) - v = self.to_v(context) - - # Use flash attention if it's available and the head size is less than or equal to `128` - if ( - CrossAttention.use_flash_attention - and self.flash is not None - and not has_cond - and self.d_head <= 128 - ): - return self.flash_attention(q, k, v) - # Otherwise, fallback to normal attention - else: - return self.normal_attention(q, k, v) - - def flash_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): - """ - #### Flash Attention - :param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - :param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - :param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - """ - - # Get batch size and number of elements along sequence axis (`width * height`) - batch_size, seq_len, _ = q.shape - - # Stack `q`, `k`, `v` vectors for flash attention, to get a single tensor of - # shape `[batch_size, seq_len, 3, n_heads * d_head]` - qkv = torch.stack((q, k, v), dim=2) - # Split the heads - qkv = qkv.view(batch_size, seq_len, 3, self.n_heads, self.d_head) - - # Flash attention works for head sizes `32`, `64` and `128`, so we have to pad the heads to - # fit this size. - if self.d_head <= 32: - pad = 32 - self.d_head - elif self.d_head <= 64: - pad = 64 - self.d_head - elif self.d_head <= 128: - pad = 128 - self.d_head - else: - raise ValueError(f"Head size ${self.d_head} too large for Flash Attention") - - # Pad the heads - if pad: - qkv = torch.cat( - (qkv, qkv.new_zeros(batch_size, seq_len, 3, self.n_heads, pad)), dim=-1 - ) - - # Compute attention - # $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)V$$ - # This gives a tensor of shape `[batch_size, seq_len, n_heads, d_padded]` - # TODO here I add the dtype changing - out, _ = self.flash(qkv.type(torch.float16)) - # Truncate the extra head size - out = out[:, :, :, : self.d_head].float() - # Reshape to `[batch_size, seq_len, n_heads * d_head]` - out = out.reshape(batch_size, seq_len, self.n_heads * self.d_head) - - # Map to `[batch_size, height * width, d_model]` with a linear layer - return self.to_out(out) - - def normal_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): - """ - #### Normal Attention - - :param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - :param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - :param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` - """ - - # Split them to heads of shape `[batch_size, seq_len, n_heads, d_head]` - q = q.view(*q.shape[:2], self.n_heads, -1) # [bs, 64, 20, 32] - k = k.view(*k.shape[:2], self.n_heads, -1) # [bs, 1, 20, 32] - v = v.view(*v.shape[:2], self.n_heads, -1) - - # Calculate attention $\frac{Q K^\top}{\sqrt{d_{key}}}$ - attn = torch.einsum("bihd,bjhd->bhij", q, k) * self.scale - - # Compute softmax - # $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)$$ - if self.is_inplace: - half = attn.shape[0] // 2 - attn[half:] = attn[half:].softmax(dim=-1) - attn[:half] = attn[:half].softmax(dim=-1) - else: - attn = attn.softmax(dim=-1) - - # Compute attention output - # $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)V$$ - # attn: [bs, 20, 64, 1] - # v: [bs, 1, 20, 32] - out = torch.einsum("bhij,bjhd->bihd", attn, v) - # Reshape to `[batch_size, height * width, n_heads * d_head]` - out = out.reshape(*out.shape[:2], -1) - # Map to `[batch_size, height * width, d_model]` with a linear layer - return self.to_out(out) - - -# class CrossAttention(nn.Module): -# def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): -# super().__init__() -# inner_dim = dim_head * heads -# context_dim = default(context_dim, query_dim) - -# self.scale = dim_head ** -0.5 -# self.heads = heads - -# self.to_q = nn.Linear(query_dim, inner_dim, bias=False) -# self.to_k = nn.Linear(context_dim, inner_dim, bias=False) -# self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - -# self.to_out = nn.Sequential( -# nn.Linear(inner_dim, query_dim), -# nn.Dropout(dropout) -# ) - -# def forward(self, x, context=None, mask=None): -# h = self.heads - -# q = self.to_q(x) -# context = default(context, x) -# k = self.to_k(context) -# v = self.to_v(context) - -# q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - -# sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - -# if exists(mask): -# mask = rearrange(mask, 'b ... -> b (...)') -# max_neg_value = -torch.finfo(sim.dtype).max -# mask = repeat(mask, 'b j -> (b h) () j', h=h) -# sim.masked_fill_(~mask, max_neg_value) - -# # attention, what we cannot get enough of -# attn = sim.softmax(dim=-1) - -# out = einsum('b i j, b j d -> b i d', attn, v) -# out = rearrange(out, '(b h) n d -> b n (h d)', h=h) -# return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__( - self, - dim, - n_heads, - d_head, - dropout=0.0, - context_dim=None, - gated_ff=True, - checkpoint=True, - ): - super().__init__() - self.attn1 = CrossAttention( - query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout - ) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention( - query_dim=dim, - context_dim=context_dim, - heads=n_heads, - dim_head=d_head, - dropout=dropout, - ) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - if context is None: - return checkpoint(self._forward, (x,), self.parameters(), self.checkpoint) - else: - return checkpoint( - self._forward, (x, context), self.parameters(), self.checkpoint - ) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - - def __init__( - self, - in_channels, - n_heads, - d_head, - depth=1, - dropout=0.0, - context_dim=None, - no_context=False, - ): - super().__init__() - - if no_context: - context_dim = None - - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d( - in_channels, inner_dim, kernel_size=1, stride=1, padding=0 - ) - - self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock( - inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim - ) - for d in range(depth) - ] - ) - - self.proj_out = zero_module( - nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) - ) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, "b c h w -> b (h w) c") - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) - x = self.proj_out(x) - return x + x_in diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/swin.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/swin.py deleted file mode 100644 index 80ad0c79c17c8cb6d68e8e1e9ed94bbb8d6b851d..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/swin.py +++ /dev/null @@ -1,689 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Implementation of Swin models from :paper:`swin`. - -This code is adapted from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py with minimal modifications. # noqa --------------------------------------------------------- -Swin Transformer -Copyright (c) 2021 Microsoft -Licensed under The MIT License [see LICENSE for details] -Written by Ze Liu, Yutong Lin, Yixuan Wei --------------------------------------------------------- -LICENSE: https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/461e003166a8083d0b620beacd4662a2df306bd6/LICENSE -""" - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from detectron2.modeling.backbone.backbone import Backbone - - -class Mlp(nn.Module): - """Multilayer perceptron.""" - - def __init__( - self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. - Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) - ) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=0.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = q @ k.transpose(-2, -1) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 - ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__( - self, - dim, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - ): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop - ) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. - Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - ): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList( - [ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - w_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(Backbone): - """Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted - Windows` - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=(2, 2, 6, 2), - num_heads=(3, 6, 12, 24), - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - use_checkpoint=False, - ): - super().__init__() - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None, - ) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - pretrain_img_size[0] // patch_size[0], - pretrain_img_size[1] // patch_size[1], - ] - - self.absolute_pos_embed = nn.Parameter( - torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) - ) - trunc_normal_(self.absolute_pos_embed, std=0.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2**i_layer), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint, - ) - self.layers.append(layer) - - num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f"norm{i_layer}" - self.add_module(layer_name, layer) - - self._freeze_stages() - self._out_features = ["p{}".format(i) for i in self.out_indices] - self._out_feature_channels = { - "p{}".format(i): self.embed_dim * 2**i for i in self.out_indices - } - self._out_feature_strides = {"p{}".format(i): 2 ** (i + 2) for i in self.out_indices} - self._size_devisibility = 32 - - self.apply(self._init_weights) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @property - def size_divisibility(self): - return self._size_divisibility - - def forward(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = {} - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs["p{}".format(i)] = out - - return outs diff --git a/spaces/cccc-c/web-ui-pub/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js b/spaces/cccc-c/web-ui-pub/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js deleted file mode 100644 index 5b3ff592fd46c8736892a12864fdf3fed8775202..0000000000000000000000000000000000000000 --- a/spaces/cccc-c/web-ui-pub/_next/static/6IdLO6aTsNNii8PXpVk8p/_ssgManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() \ No newline at end of file diff --git a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/thai.py b/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/thai.py deleted file mode 100644 index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000 --- a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/thai.py +++ /dev/null @@ -1,44 +0,0 @@ -import re -from num_thai.thainumbers import NumThai - - -num = NumThai() - -# List of (Latin alphabet, Thai) pairs: -_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'เอ'), - ('b','บี'), - ('c','ซี'), - ('d','ดี'), - ('e','อี'), - ('f','เอฟ'), - ('g','จี'), - ('h','เอช'), - ('i','ไอ'), - ('j','เจ'), - ('k','เค'), - ('l','แอล'), - ('m','เอ็ม'), - ('n','เอ็น'), - ('o','โอ'), - ('p','พี'), - ('q','คิว'), - ('r','แอร์'), - ('s','เอส'), - ('t','ที'), - ('u','ยู'), - ('v','วี'), - ('w','ดับเบิลยู'), - ('x','เอ็กซ์'), - ('y','วาย'), - ('z','ซี') -]] - - -def num_to_thai(text): - return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) - -def latin_to_thai(text): - for regex, replacement in _latin_to_thai: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/chenyangqi/FateZero/app_fatezero.py b/spaces/chenyangqi/FateZero/app_fatezero.py deleted file mode 100644 index 1271d25127223c8811e9bf22c6219f023964ad30..0000000000000000000000000000000000000000 --- a/spaces/chenyangqi/FateZero/app_fatezero.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os - -import gradio as gr - -from inference_fatezero import merge_config_then_run - - -# TITLE = '# [FateZero](http://fate-zero-edit.github.io/)' -HF_TOKEN = os.getenv('HF_TOKEN') -# pipe = InferencePipeline(HF_TOKEN) -pipe = merge_config_then_run() -# app = InferenceUtil(HF_TOKEN) - -with gr.Blocks(css='style.css') as demo: - # gr.Markdown(TITLE) - - gr.HTML( - """ -
      -

      - FateZero : Fusing Attentions for Zero-shot Text-based Video Editing -

      -

      - Chenyang Qi - Xiaodong Cun , Yong Zhang, - Chenyang Lei, Xintao Wang , - Ying Shan, - Qifeng Chen -

      - -

      - - [ - - - - arXiv - ] - - - - - [ - - - - Code - ] - - - - - [ - - - - Homepage - ] - - - - - [ - - - - Video - ] - -

      -

      - TL;DR: FateZero is the first zero-shot framework for text-driven video editing via pretrained diffusion models without training. -

      -
      - """) - - - gr.HTML(""" -

      We provide an Editing Guidance to help users to choose hyperparameters when editing in-the-wild video. -

      Note that due to the limits of memory and computing resources on hugging-face, the results here are only toy examples and take longer to edit. -

      You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue. -
      - - Duplicate Space -

      Alternatively, try our GitHub code on your GPU. -

      """) - - with gr.Row(): - with gr.Column(): - with gr.Accordion('Input Video', open=True): - # user_input_video = gr.File(label='Input Source Video') - user_input_video = gr.Video(label='Input Source Video', source='upload', type='numpy', format="mp4", visible=True).style(height="auto") - with gr.Accordion('Temporal Crop offset and Sampling Stride', open=False): - n_sample_frame = gr.Slider(label='Number of Frames', - minimum=0, - maximum=32, - step=1, - value=8) - stride = gr.Slider(label='Temporal stride', - minimum=0, - maximum=20, - step=1, - value=1) - start_sample_frame = gr.Number(label='Start frame in the video', - value=0, - precision=0) - - with gr.Accordion('Spatial Crop offset', open=False): - left_crop = gr.Number(label='Left crop', - value=0, - precision=0) - right_crop = gr.Number(label='Right crop', - value=0, - precision=0) - top_crop = gr.Number(label='Top crop', - value=0, - precision=0) - bottom_crop = gr.Number(label='Bottom crop', - value=0, - precision=0) - offset_list = [ - left_crop, - right_crop, - top_crop, - bottom_crop, - ] - - ImageSequenceDataset_list = [ - start_sample_frame, - n_sample_frame, - stride - ] + offset_list - - model_id = gr.Dropdown( - label='Model ID', - choices=[ - 'CompVis/stable-diffusion-v1-4', - # add shape editing ckpt here - ], - value='CompVis/stable-diffusion-v1-4') - - - with gr.Accordion('Text Prompt', open=True): - - source_prompt = gr.Textbox(label='Source Prompt', - info='A good prompt describes each frame and most objects in video. Especially, it has the object or attribute that we want to edit or preserve.', - max_lines=1, - placeholder='Example: "a silver jeep driving down a curvy road in the countryside"', - value='a silver jeep driving down a curvy road in the countryside') - target_prompt = gr.Textbox(label='Target Prompt', - info='A reasonable composition of video may achieve better results(e.g., "sunflower" video with "Van Gogh" prompt is better than "sunflower" with "Monet")', - max_lines=1, - placeholder='Example: "watercolor painting of a silver jeep driving down a curvy road in the countryside"', - value='watercolor painting of a silver jeep driving down a curvy road in the countryside') - - - - - - run_button = gr.Button('Generate') - - with gr.Column(): - result = gr.Video(label='Result') - # result.style(height=512, width=512) - with gr.Accordion('FateZero Parameters for attention fusing', open=True): - cross_replace_steps = gr.Slider(label='Cross-att replace steps', - info='More steps, replace more cross attention to preserve semantic layout.', - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.7) - - self_replace_steps = gr.Slider(label='Self-att replace steps', - info='More steps, replace more spatial-temporal self-attention to preserve geometry and motion.', - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.7) - - enhance_words = gr.Textbox(label='Enhanced words', - info='Amplify the target-words cross attention', - max_lines=1, - placeholder='Example: "watercolor "', - value='watercolor') - - enhance_words_value = gr.Slider(label='Target cross-att amplification', - info='larger value, more elements of target words', - minimum=0.0, - maximum=20.0, - step=1, - value=10) - with gr.Accordion('DDIM Parameters', open=True): - num_steps = gr.Slider(label='Number of Steps', - info='larger value has better editing capacity, but takes more time and memory. (50 steps may produces memory errors)', - minimum=0, - maximum=30, - step=1, - value=15) - guidance_scale = gr.Slider(label='CFG Scale', - minimum=0, - maximum=50, - step=0.1, - value=7.5) - with gr.Row(): - from example import style_example - examples = style_example - - gr.Examples(examples=examples, - inputs=[ - model_id, - user_input_video, - source_prompt, - target_prompt, - cross_replace_steps, - self_replace_steps, - enhance_words, - enhance_words_value, - num_steps, - guidance_scale, - user_input_video, - *ImageSequenceDataset_list - ], - outputs=result, - fn=pipe.run, - cache_examples=True, - # cache_examples=os.getenv('SYSTEM') == 'spaces' - ) - - inputs = [ - model_id, - user_input_video, - source_prompt, - target_prompt, - cross_replace_steps, - self_replace_steps, - enhance_words, - enhance_words_value, - num_steps, - guidance_scale, - user_input_video, - *ImageSequenceDataset_list - ] - target_prompt.submit(fn=pipe.run, inputs=inputs, outputs=result) - run_button.click(fn=pipe.run, inputs=inputs, outputs=result) - -demo.queue().launch() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/DdsImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/DdsImagePlugin.py deleted file mode 100644 index a946daeaa6b9a5946fc5492443dfddbb10881c99..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/DdsImagePlugin.py +++ /dev/null @@ -1,291 +0,0 @@ -""" -A Pillow loader for .dds files (S3TC-compressed aka DXTC) -Jerome Leclanche - -Documentation: - https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt - -The contents of this file are hereby released in the public domain (CC0) -Full text of the CC0 license: - https://creativecommons.org/publicdomain/zero/1.0/ -""" - -import struct -from io import BytesIO - -from . import Image, ImageFile -from ._binary import o32le as o32 - -# Magic ("DDS ") -DDS_MAGIC = 0x20534444 - -# DDS flags -DDSD_CAPS = 0x1 -DDSD_HEIGHT = 0x2 -DDSD_WIDTH = 0x4 -DDSD_PITCH = 0x8 -DDSD_PIXELFORMAT = 0x1000 -DDSD_MIPMAPCOUNT = 0x20000 -DDSD_LINEARSIZE = 0x80000 -DDSD_DEPTH = 0x800000 - -# DDS caps -DDSCAPS_COMPLEX = 0x8 -DDSCAPS_TEXTURE = 0x1000 -DDSCAPS_MIPMAP = 0x400000 - -DDSCAPS2_CUBEMAP = 0x200 -DDSCAPS2_CUBEMAP_POSITIVEX = 0x400 -DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800 -DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000 -DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000 -DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000 -DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000 -DDSCAPS2_VOLUME = 0x200000 - -# Pixel Format -DDPF_ALPHAPIXELS = 0x1 -DDPF_ALPHA = 0x2 -DDPF_FOURCC = 0x4 -DDPF_PALETTEINDEXED8 = 0x20 -DDPF_RGB = 0x40 -DDPF_LUMINANCE = 0x20000 - - -# dds.h - -DDS_FOURCC = DDPF_FOURCC -DDS_RGB = DDPF_RGB -DDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS -DDS_LUMINANCE = DDPF_LUMINANCE -DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS -DDS_ALPHA = DDPF_ALPHA -DDS_PAL8 = DDPF_PALETTEINDEXED8 - -DDS_HEADER_FLAGS_TEXTURE = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | DDSD_PIXELFORMAT -DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT -DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH -DDS_HEADER_FLAGS_PITCH = DDSD_PITCH -DDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE - -DDS_HEIGHT = DDSD_HEIGHT -DDS_WIDTH = DDSD_WIDTH - -DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE -DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP -DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX - -DDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX -DDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX -DDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY -DDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY -DDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ -DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ - - -# DXT1 -DXT1_FOURCC = 0x31545844 - -# DXT3 -DXT3_FOURCC = 0x33545844 - -# DXT5 -DXT5_FOURCC = 0x35545844 - - -# dxgiformat.h - -DXGI_FORMAT_R8G8B8A8_TYPELESS = 27 -DXGI_FORMAT_R8G8B8A8_UNORM = 28 -DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = 29 -DXGI_FORMAT_BC5_TYPELESS = 82 -DXGI_FORMAT_BC5_UNORM = 83 -DXGI_FORMAT_BC5_SNORM = 84 -DXGI_FORMAT_BC6H_UF16 = 95 -DXGI_FORMAT_BC6H_SF16 = 96 -DXGI_FORMAT_BC7_TYPELESS = 97 -DXGI_FORMAT_BC7_UNORM = 98 -DXGI_FORMAT_BC7_UNORM_SRGB = 99 - - -class DdsImageFile(ImageFile.ImageFile): - format = "DDS" - format_description = "DirectDraw Surface" - - def _open(self): - if not _accept(self.fp.read(4)): - msg = "not a DDS file" - raise SyntaxError(msg) - (header_size,) = struct.unpack(" (offset, generation) - self.new_entries = {} # object ID => (offset, generation) - self.deleted_entries = {0: 65536} # object ID => generation - self.reading_finished = False - - def __setitem__(self, key, value): - if self.reading_finished: - self.new_entries[key] = value - else: - self.existing_entries[key] = value - if key in self.deleted_entries: - del self.deleted_entries[key] - - def __getitem__(self, key): - try: - return self.new_entries[key] - except KeyError: - return self.existing_entries[key] - - def __delitem__(self, key): - if key in self.new_entries: - generation = self.new_entries[key][1] + 1 - del self.new_entries[key] - self.deleted_entries[key] = generation - elif key in self.existing_entries: - generation = self.existing_entries[key][1] + 1 - self.deleted_entries[key] = generation - elif key in self.deleted_entries: - generation = self.deleted_entries[key] - else: - msg = ( - "object ID " + str(key) + " cannot be deleted because it doesn't exist" - ) - raise IndexError(msg) - - def __contains__(self, key): - return key in self.existing_entries or key in self.new_entries - - def __len__(self): - return len( - set(self.existing_entries.keys()) - | set(self.new_entries.keys()) - | set(self.deleted_entries.keys()) - ) - - def keys(self): - return ( - set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) - ) | set(self.new_entries.keys()) - - def write(self, f): - keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) - deleted_keys = sorted(set(self.deleted_entries.keys())) - startxref = f.tell() - f.write(b"xref\n") - while keys: - # find a contiguous sequence of object IDs - prev = None - for index, key in enumerate(keys): - if prev is None or prev + 1 == key: - prev = key - else: - contiguous_keys = keys[:index] - keys = keys[index:] - break - else: - contiguous_keys = keys - keys = None - f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys))) - for object_id in contiguous_keys: - if object_id in self.new_entries: - f.write(b"%010d %05d n \n" % self.new_entries[object_id]) - else: - this_deleted_object_id = deleted_keys.pop(0) - check_format_condition( - object_id == this_deleted_object_id, - f"expected the next deleted object ID to be {object_id}, " - f"instead found {this_deleted_object_id}", - ) - try: - next_in_linked_list = deleted_keys[0] - except IndexError: - next_in_linked_list = 0 - f.write( - b"%010d %05d f \n" - % (next_in_linked_list, self.deleted_entries[object_id]) - ) - return startxref - - -class PdfName: - def __init__(self, name): - if isinstance(name, PdfName): - self.name = name.name - elif isinstance(name, bytes): - self.name = name - else: - self.name = name.encode("us-ascii") - - def name_as_str(self): - return self.name.decode("us-ascii") - - def __eq__(self, other): - return ( - isinstance(other, PdfName) and other.name == self.name - ) or other == self.name - - def __hash__(self): - return hash(self.name) - - def __repr__(self): - return f"PdfName({repr(self.name)})" - - @classmethod - def from_pdf_stream(cls, data): - return cls(PdfParser.interpret_name(data)) - - allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"} - - def __bytes__(self): - result = bytearray(b"/") - for b in self.name: - if b in self.allowed_chars: - result.append(b) - else: - result.extend(b"#%02X" % b) - return bytes(result) - - -class PdfArray(list): - def __bytes__(self): - return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" - - -class PdfDict(collections.UserDict): - def __setattr__(self, key, value): - if key == "data": - collections.UserDict.__setattr__(self, key, value) - else: - self[key.encode("us-ascii")] = value - - def __getattr__(self, key): - try: - value = self[key.encode("us-ascii")] - except KeyError as e: - raise AttributeError(key) from e - if isinstance(value, bytes): - value = decode_text(value) - if key.endswith("Date"): - if value.startswith("D:"): - value = value[2:] - - relationship = "Z" - if len(value) > 17: - relationship = value[14] - offset = int(value[15:17]) * 60 - if len(value) > 20: - offset += int(value[18:20]) - - format = "%Y%m%d%H%M%S"[: len(value) - 2] - value = time.strptime(value[: len(format) + 2], format) - if relationship in ["+", "-"]: - offset *= 60 - if relationship == "+": - offset *= -1 - value = time.gmtime(calendar.timegm(value) + offset) - return value - - def __bytes__(self): - out = bytearray(b"<<") - for key, value in self.items(): - if value is None: - continue - value = pdf_repr(value) - out.extend(b"\n") - out.extend(bytes(PdfName(key))) - out.extend(b" ") - out.extend(value) - out.extend(b"\n>>") - return bytes(out) - - -class PdfBinary: - def __init__(self, data): - self.data = data - - def __bytes__(self): - return b"<%s>" % b"".join(b"%02X" % b for b in self.data) - - -class PdfStream: - def __init__(self, dictionary, buf): - self.dictionary = dictionary - self.buf = buf - - def decode(self): - try: - filter = self.dictionary.Filter - except AttributeError: - return self.buf - if filter == b"FlateDecode": - try: - expected_length = self.dictionary.DL - except AttributeError: - expected_length = self.dictionary.Length - return zlib.decompress(self.buf, bufsize=int(expected_length)) - else: - msg = f"stream filter {repr(self.dictionary.Filter)} unknown/unsupported" - raise NotImplementedError(msg) - - -def pdf_repr(x): - if x is True: - return b"true" - elif x is False: - return b"false" - elif x is None: - return b"null" - elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): - return bytes(x) - elif isinstance(x, (int, float)): - return str(x).encode("us-ascii") - elif isinstance(x, time.struct_time): - return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" - elif isinstance(x, dict): - return bytes(PdfDict(x)) - elif isinstance(x, list): - return bytes(PdfArray(x)) - elif isinstance(x, str): - return pdf_repr(encode_text(x)) - elif isinstance(x, bytes): - # XXX escape more chars? handle binary garbage - x = x.replace(b"\\", b"\\\\") - x = x.replace(b"(", b"\\(") - x = x.replace(b")", b"\\)") - return b"(" + x + b")" - else: - return bytes(x) - - -class PdfParser: - """Based on - https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf - Supports PDF up to 1.4 - """ - - def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): - if buf and f: - msg = "specify buf or f or filename, but not both buf and f" - raise RuntimeError(msg) - self.filename = filename - self.buf = buf - self.f = f - self.start_offset = start_offset - self.should_close_buf = False - self.should_close_file = False - if filename is not None and f is None: - self.f = f = open(filename, mode) - self.should_close_file = True - if f is not None: - self.buf = buf = self.get_buf_from_file(f) - self.should_close_buf = True - if not filename and hasattr(f, "name"): - self.filename = f.name - self.cached_objects = {} - if buf: - self.read_pdf_info() - else: - self.file_size_total = self.file_size_this = 0 - self.root = PdfDict() - self.root_ref = None - self.info = PdfDict() - self.info_ref = None - self.page_tree_root = {} - self.pages = [] - self.orig_pages = [] - self.pages_ref = None - self.last_xref_section_offset = None - self.trailer_dict = {} - self.xref_table = XrefTable() - self.xref_table.reading_finished = True - if f: - self.seek_end() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - return False # do not suppress exceptions - - def start_writing(self): - self.close_buf() - self.seek_end() - - def close_buf(self): - try: - self.buf.close() - except AttributeError: - pass - self.buf = None - - def close(self): - if self.should_close_buf: - self.close_buf() - if self.f is not None and self.should_close_file: - self.f.close() - self.f = None - - def seek_end(self): - self.f.seek(0, os.SEEK_END) - - def write_header(self): - self.f.write(b"%PDF-1.4\n") - - def write_comment(self, s): - self.f.write(f"% {s}\n".encode()) - - def write_catalog(self): - self.del_root() - self.root_ref = self.next_object_id(self.f.tell()) - self.pages_ref = self.next_object_id(0) - self.rewrite_pages() - self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) - self.write_obj( - self.pages_ref, - Type=PdfName(b"Pages"), - Count=len(self.pages), - Kids=self.pages, - ) - return self.root_ref - - def rewrite_pages(self): - pages_tree_nodes_to_delete = [] - for i, page_ref in enumerate(self.orig_pages): - page_info = self.cached_objects[page_ref] - del self.xref_table[page_ref.object_id] - pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) - if page_ref not in self.pages: - # the page has been deleted - continue - # make dict keys into strings for passing to write_page - stringified_page_info = {} - for key, value in page_info.items(): - # key should be a PdfName - stringified_page_info[key.name_as_str()] = value - stringified_page_info["Parent"] = self.pages_ref - new_page_ref = self.write_page(None, **stringified_page_info) - for j, cur_page_ref in enumerate(self.pages): - if cur_page_ref == page_ref: - # replace the page reference with the new one - self.pages[j] = new_page_ref - # delete redundant Pages tree nodes from xref table - for pages_tree_node_ref in pages_tree_nodes_to_delete: - while pages_tree_node_ref: - pages_tree_node = self.cached_objects[pages_tree_node_ref] - if pages_tree_node_ref.object_id in self.xref_table: - del self.xref_table[pages_tree_node_ref.object_id] - pages_tree_node_ref = pages_tree_node.get(b"Parent", None) - self.orig_pages = [] - - def write_xref_and_trailer(self, new_root_ref=None): - if new_root_ref: - self.del_root() - self.root_ref = new_root_ref - if self.info: - self.info_ref = self.write_obj(None, self.info) - start_xref = self.xref_table.write(self.f) - num_entries = len(self.xref_table) - trailer_dict = {b"Root": self.root_ref, b"Size": num_entries} - if self.last_xref_section_offset is not None: - trailer_dict[b"Prev"] = self.last_xref_section_offset - if self.info: - trailer_dict[b"Info"] = self.info_ref - self.last_xref_section_offset = start_xref - self.f.write( - b"trailer\n" - + bytes(PdfDict(trailer_dict)) - + b"\nstartxref\n%d\n%%%%EOF" % start_xref - ) - - def write_page(self, ref, *objs, **dict_obj): - if isinstance(ref, int): - ref = self.pages[ref] - if "Type" not in dict_obj: - dict_obj["Type"] = PdfName(b"Page") - if "Parent" not in dict_obj: - dict_obj["Parent"] = self.pages_ref - return self.write_obj(ref, *objs, **dict_obj) - - def write_obj(self, ref, *objs, **dict_obj): - f = self.f - if ref is None: - ref = self.next_object_id(f.tell()) - else: - self.xref_table[ref.object_id] = (f.tell(), ref.generation) - f.write(bytes(IndirectObjectDef(*ref))) - stream = dict_obj.pop("stream", None) - if stream is not None: - dict_obj["Length"] = len(stream) - if dict_obj: - f.write(pdf_repr(dict_obj)) - for obj in objs: - f.write(pdf_repr(obj)) - if stream is not None: - f.write(b"stream\n") - f.write(stream) - f.write(b"\nendstream\n") - f.write(b"endobj\n") - return ref - - def del_root(self): - if self.root_ref is None: - return - del self.xref_table[self.root_ref.object_id] - del self.xref_table[self.root[b"Pages"].object_id] - - @staticmethod - def get_buf_from_file(f): - if hasattr(f, "getbuffer"): - return f.getbuffer() - elif hasattr(f, "getvalue"): - return f.getvalue() - else: - try: - return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - except ValueError: # cannot mmap an empty file - return b"" - - def read_pdf_info(self): - self.file_size_total = len(self.buf) - self.file_size_this = self.file_size_total - self.start_offset - self.read_trailer() - self.root_ref = self.trailer_dict[b"Root"] - self.info_ref = self.trailer_dict.get(b"Info", None) - self.root = PdfDict(self.read_indirect(self.root_ref)) - if self.info_ref is None: - self.info = PdfDict() - else: - self.info = PdfDict(self.read_indirect(self.info_ref)) - check_format_condition(b"Type" in self.root, "/Type missing in Root") - check_format_condition( - self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" - ) - check_format_condition(b"Pages" in self.root, "/Pages missing in Root") - check_format_condition( - isinstance(self.root[b"Pages"], IndirectReference), - "/Pages in Root is not an indirect reference", - ) - self.pages_ref = self.root[b"Pages"] - self.page_tree_root = self.read_indirect(self.pages_ref) - self.pages = self.linearize_page_tree(self.page_tree_root) - # save the original list of page references - # in case the user modifies, adds or deletes some pages - # and we need to rewrite the pages and their list - self.orig_pages = self.pages[:] - - def next_object_id(self, offset=None): - try: - # TODO: support reuse of deleted objects - reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) - except ValueError: - reference = IndirectReference(1, 0) - if offset is not None: - self.xref_table[reference.object_id] = (offset, 0) - return reference - - delimiter = rb"[][()<>{}/%]" - delimiter_or_ws = rb"[][()<>{}/%\000\011\012\014\015\040]" - whitespace = rb"[\000\011\012\014\015\040]" - whitespace_or_hex = rb"[\000\011\012\014\015\0400-9a-fA-F]" - whitespace_optional = whitespace + b"*" - whitespace_mandatory = whitespace + b"+" - # No "\012" aka "\n" or "\015" aka "\r": - whitespace_optional_no_nl = rb"[\000\011\014\040]*" - newline_only = rb"[\r\n]+" - newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl - re_trailer_end = re.compile( - whitespace_mandatory - + rb"trailer" - + whitespace_optional - + rb"<<(.*>>)" - + newline - + rb"startxref" - + newline - + rb"([0-9]+)" - + newline - + rb"%%EOF" - + whitespace_optional - + rb"$", - re.DOTALL, - ) - re_trailer_prev = re.compile( - whitespace_optional - + rb"trailer" - + whitespace_optional - + rb"<<(.*?>>)" - + newline - + rb"startxref" - + newline - + rb"([0-9]+)" - + newline - + rb"%%EOF" - + whitespace_optional, - re.DOTALL, - ) - - def read_trailer(self): - search_start_offset = len(self.buf) - 16384 - if search_start_offset < self.start_offset: - search_start_offset = self.start_offset - m = self.re_trailer_end.search(self.buf, search_start_offset) - check_format_condition(m, "trailer end not found") - # make sure we found the LAST trailer - last_match = m - while m: - last_match = m - m = self.re_trailer_end.search(self.buf, m.start() + 16) - if not m: - m = last_match - trailer_data = m.group(1) - self.last_xref_section_offset = int(m.group(2)) - self.trailer_dict = self.interpret_trailer(trailer_data) - self.xref_table = XrefTable() - self.read_xref_table(xref_section_offset=self.last_xref_section_offset) - if b"Prev" in self.trailer_dict: - self.read_prev_trailer(self.trailer_dict[b"Prev"]) - - def read_prev_trailer(self, xref_section_offset): - trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) - m = self.re_trailer_prev.search( - self.buf[trailer_offset : trailer_offset + 16384] - ) - check_format_condition(m, "previous trailer not found") - trailer_data = m.group(1) - check_format_condition( - int(m.group(2)) == xref_section_offset, - "xref section offset in previous trailer doesn't match what was expected", - ) - trailer_dict = self.interpret_trailer(trailer_data) - if b"Prev" in trailer_dict: - self.read_prev_trailer(trailer_dict[b"Prev"]) - - re_whitespace_optional = re.compile(whitespace_optional) - re_name = re.compile( - whitespace_optional - + rb"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" - + delimiter_or_ws - + rb")" - ) - re_dict_start = re.compile(whitespace_optional + rb"<<") - re_dict_end = re.compile(whitespace_optional + rb">>" + whitespace_optional) - - @classmethod - def interpret_trailer(cls, trailer_data): - trailer = {} - offset = 0 - while True: - m = cls.re_name.match(trailer_data, offset) - if not m: - m = cls.re_dict_end.match(trailer_data, offset) - check_format_condition( - m and m.end() == len(trailer_data), - "name not found in trailer, remaining data: " - + repr(trailer_data[offset:]), - ) - break - key = cls.interpret_name(m.group(1)) - value, offset = cls.get_value(trailer_data, m.end()) - trailer[key] = value - check_format_condition( - b"Size" in trailer and isinstance(trailer[b"Size"], int), - "/Size not in trailer or not an integer", - ) - check_format_condition( - b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), - "/Root not in trailer or not an indirect reference", - ) - return trailer - - re_hashes_in_name = re.compile(rb"([^#]*)(#([0-9a-fA-F]{2}))?") - - @classmethod - def interpret_name(cls, raw, as_text=False): - name = b"" - for m in cls.re_hashes_in_name.finditer(raw): - if m.group(3): - name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) - else: - name += m.group(1) - if as_text: - return name.decode("utf-8") - else: - return bytes(name) - - re_null = re.compile(whitespace_optional + rb"null(?=" + delimiter_or_ws + rb")") - re_true = re.compile(whitespace_optional + rb"true(?=" + delimiter_or_ws + rb")") - re_false = re.compile(whitespace_optional + rb"false(?=" + delimiter_or_ws + rb")") - re_int = re.compile( - whitespace_optional + rb"([-+]?[0-9]+)(?=" + delimiter_or_ws + rb")" - ) - re_real = re.compile( - whitespace_optional - + rb"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" - + delimiter_or_ws - + rb")" - ) - re_array_start = re.compile(whitespace_optional + rb"\[") - re_array_end = re.compile(whitespace_optional + rb"]") - re_string_hex = re.compile( - whitespace_optional + rb"<(" + whitespace_or_hex + rb"*)>" - ) - re_string_lit = re.compile(whitespace_optional + rb"\(") - re_indirect_reference = re.compile( - whitespace_optional - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"R(?=" - + delimiter_or_ws - + rb")" - ) - re_indirect_def_start = re.compile( - whitespace_optional - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"([-+]?[0-9]+)" - + whitespace_mandatory - + rb"obj(?=" - + delimiter_or_ws - + rb")" - ) - re_indirect_def_end = re.compile( - whitespace_optional + rb"endobj(?=" + delimiter_or_ws + rb")" - ) - re_comment = re.compile( - rb"(" + whitespace_optional + rb"%[^\r\n]*" + newline + rb")*" - ) - re_stream_start = re.compile(whitespace_optional + rb"stream\r?\n") - re_stream_end = re.compile( - whitespace_optional + rb"endstream(?=" + delimiter_or_ws + rb")" - ) - - @classmethod - def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): - if max_nesting == 0: - return None, None - m = cls.re_comment.match(data, offset) - if m: - offset = m.end() - m = cls.re_indirect_def_start.match(data, offset) - if m: - check_format_condition( - int(m.group(1)) > 0, - "indirect object definition: object ID must be greater than 0", - ) - check_format_condition( - int(m.group(2)) >= 0, - "indirect object definition: generation must be non-negative", - ) - check_format_condition( - expect_indirect is None - or expect_indirect - == IndirectReference(int(m.group(1)), int(m.group(2))), - "indirect object definition different than expected", - ) - object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1) - if offset is None: - return object, None - m = cls.re_indirect_def_end.match(data, offset) - check_format_condition(m, "indirect object definition end not found") - return object, m.end() - check_format_condition( - not expect_indirect, "indirect object definition not found" - ) - m = cls.re_indirect_reference.match(data, offset) - if m: - check_format_condition( - int(m.group(1)) > 0, - "indirect object reference: object ID must be greater than 0", - ) - check_format_condition( - int(m.group(2)) >= 0, - "indirect object reference: generation must be non-negative", - ) - return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() - m = cls.re_dict_start.match(data, offset) - if m: - offset = m.end() - result = {} - m = cls.re_dict_end.match(data, offset) - while not m: - key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - if offset is None: - return result, None - value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - result[key] = value - if offset is None: - return result, None - m = cls.re_dict_end.match(data, offset) - offset = m.end() - m = cls.re_stream_start.match(data, offset) - if m: - try: - stream_len = int(result[b"Length"]) - except (TypeError, KeyError, ValueError) as e: - msg = "bad or missing Length in stream dict (%r)" % result.get( - b"Length", None - ) - raise PdfFormatError(msg) from e - stream_data = data[m.end() : m.end() + stream_len] - m = cls.re_stream_end.match(data, m.end() + stream_len) - check_format_condition(m, "stream end not found") - offset = m.end() - result = PdfStream(PdfDict(result), stream_data) - else: - result = PdfDict(result) - return result, offset - m = cls.re_array_start.match(data, offset) - if m: - offset = m.end() - result = [] - m = cls.re_array_end.match(data, offset) - while not m: - value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) - result.append(value) - if offset is None: - return result, None - m = cls.re_array_end.match(data, offset) - return result, m.end() - m = cls.re_null.match(data, offset) - if m: - return None, m.end() - m = cls.re_true.match(data, offset) - if m: - return True, m.end() - m = cls.re_false.match(data, offset) - if m: - return False, m.end() - m = cls.re_name.match(data, offset) - if m: - return PdfName(cls.interpret_name(m.group(1))), m.end() - m = cls.re_int.match(data, offset) - if m: - return int(m.group(1)), m.end() - m = cls.re_real.match(data, offset) - if m: - # XXX Decimal instead of float??? - return float(m.group(1)), m.end() - m = cls.re_string_hex.match(data, offset) - if m: - # filter out whitespace - hex_string = bytearray( - b for b in m.group(1) if b in b"0123456789abcdefABCDEF" - ) - if len(hex_string) % 2 == 1: - # append a 0 if the length is not even - yes, at the end - hex_string.append(ord(b"0")) - return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() - m = cls.re_string_lit.match(data, offset) - if m: - return cls.get_literal_string(data, m.end()) - # return None, offset # fallback (only for debugging) - msg = "unrecognized object: " + repr(data[offset : offset + 32]) - raise PdfFormatError(msg) - - re_lit_str_token = re.compile( - rb"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" - ) - escaped_chars = { - b"n": b"\n", - b"r": b"\r", - b"t": b"\t", - b"b": b"\b", - b"f": b"\f", - b"(": b"(", - b")": b")", - b"\\": b"\\", - ord(b"n"): b"\n", - ord(b"r"): b"\r", - ord(b"t"): b"\t", - ord(b"b"): b"\b", - ord(b"f"): b"\f", - ord(b"("): b"(", - ord(b")"): b")", - ord(b"\\"): b"\\", - } - - @classmethod - def get_literal_string(cls, data, offset): - nesting_depth = 0 - result = bytearray() - for m in cls.re_lit_str_token.finditer(data, offset): - result.extend(data[offset : m.start()]) - if m.group(1): - result.extend(cls.escaped_chars[m.group(1)[1]]) - elif m.group(2): - result.append(int(m.group(2)[1:], 8)) - elif m.group(3): - pass - elif m.group(5): - result.extend(b"\n") - elif m.group(6): - result.extend(b"(") - nesting_depth += 1 - elif m.group(7): - if nesting_depth == 0: - return bytes(result), m.end() - result.extend(b")") - nesting_depth -= 1 - offset = m.end() - msg = "unfinished literal string" - raise PdfFormatError(msg) - - re_xref_section_start = re.compile(whitespace_optional + rb"xref" + newline) - re_xref_subsection_start = re.compile( - whitespace_optional - + rb"([0-9]+)" - + whitespace_mandatory - + rb"([0-9]+)" - + whitespace_optional - + newline_only - ) - re_xref_entry = re.compile(rb"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") - - def read_xref_table(self, xref_section_offset): - subsection_found = False - m = self.re_xref_section_start.match( - self.buf, xref_section_offset + self.start_offset - ) - check_format_condition(m, "xref section start not found") - offset = m.end() - while True: - m = self.re_xref_subsection_start.match(self.buf, offset) - if not m: - check_format_condition( - subsection_found, "xref subsection start not found" - ) - break - subsection_found = True - offset = m.end() - first_object = int(m.group(1)) - num_objects = int(m.group(2)) - for i in range(first_object, first_object + num_objects): - m = self.re_xref_entry.match(self.buf, offset) - check_format_condition(m, "xref entry not found") - offset = m.end() - is_free = m.group(3) == b"f" - if not is_free: - generation = int(m.group(2)) - new_entry = (int(m.group(1)), generation) - if i not in self.xref_table: - self.xref_table[i] = new_entry - return offset - - def read_indirect(self, ref, max_nesting=-1): - offset, generation = self.xref_table[ref[0]] - check_format_condition( - generation == ref[1], - f"expected to find generation {ref[1]} for object ID {ref[0]} in xref " - f"table, instead found generation {generation} at offset {offset}", - ) - value = self.get_value( - self.buf, - offset + self.start_offset, - expect_indirect=IndirectReference(*ref), - max_nesting=max_nesting, - )[0] - self.cached_objects[ref] = value - return value - - def linearize_page_tree(self, node=None): - if node is None: - node = self.page_tree_root - check_format_condition( - node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" - ) - pages = [] - for kid in node[b"Kids"]: - kid_object = self.read_indirect(kid) - if kid_object[b"Type"] == b"Page": - pages.append(kid) - else: - pages.extend(self.linearize_page_tree(node=kid_object)) - return pages diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/universaldetector.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/universaldetector.py deleted file mode 100644 index 30c441dc28ee327076a850b1d3c88a9a2c8f04f0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/universaldetector.py +++ /dev/null @@ -1,362 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### -""" -Module containing the UniversalDetector detector class, which is the primary -class a user of ``chardet`` should use. - -:author: Mark Pilgrim (initial port to Python) -:author: Shy Shalom (original C code) -:author: Dan Blanchard (major refactoring for 3.0) -:author: Ian Cordasco -""" - - -import codecs -import logging -import re -from typing import List, Optional, Union - -from .charsetgroupprober import CharSetGroupProber -from .charsetprober import CharSetProber -from .enums import InputState, LanguageFilter, ProbingState -from .escprober import EscCharSetProber -from .latin1prober import Latin1Prober -from .macromanprober import MacRomanProber -from .mbcsgroupprober import MBCSGroupProber -from .resultdict import ResultDict -from .sbcsgroupprober import SBCSGroupProber -from .utf1632prober import UTF1632Prober - - -class UniversalDetector: - """ - The ``UniversalDetector`` class underlies the ``chardet.detect`` function - and coordinates all of the different charset probers. - - To get a ``dict`` containing an encoding and its confidence, you can simply - run: - - .. code:: - - u = UniversalDetector() - u.feed(some_bytes) - u.close() - detected = u.result - - """ - - MINIMUM_THRESHOLD = 0.20 - HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]") - ESC_DETECTOR = re.compile(b"(\033|~{)") - WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]") - ISO_WIN_MAP = { - "iso-8859-1": "Windows-1252", - "iso-8859-2": "Windows-1250", - "iso-8859-5": "Windows-1251", - "iso-8859-6": "Windows-1256", - "iso-8859-7": "Windows-1253", - "iso-8859-8": "Windows-1255", - "iso-8859-9": "Windows-1254", - "iso-8859-13": "Windows-1257", - } - # Based on https://encoding.spec.whatwg.org/#names-and-labels - # but altered to match Python names for encodings and remove mappings - # that break tests. - LEGACY_MAP = { - "ascii": "Windows-1252", - "iso-8859-1": "Windows-1252", - "tis-620": "ISO-8859-11", - "iso-8859-9": "Windows-1254", - "gb2312": "GB18030", - "euc-kr": "CP949", - "utf-16le": "UTF-16", - } - - def __init__( - self, - lang_filter: LanguageFilter = LanguageFilter.ALL, - should_rename_legacy: bool = False, - ) -> None: - self._esc_charset_prober: Optional[EscCharSetProber] = None - self._utf1632_prober: Optional[UTF1632Prober] = None - self._charset_probers: List[CharSetProber] = [] - self.result: ResultDict = { - "encoding": None, - "confidence": 0.0, - "language": None, - } - self.done = False - self._got_data = False - self._input_state = InputState.PURE_ASCII - self._last_char = b"" - self.lang_filter = lang_filter - self.logger = logging.getLogger(__name__) - self._has_win_bytes = False - self.should_rename_legacy = should_rename_legacy - self.reset() - - @property - def input_state(self) -> int: - return self._input_state - - @property - def has_win_bytes(self) -> bool: - return self._has_win_bytes - - @property - def charset_probers(self) -> List[CharSetProber]: - return self._charset_probers - - def reset(self) -> None: - """ - Reset the UniversalDetector and all of its probers back to their - initial states. This is called by ``__init__``, so you only need to - call this directly in between analyses of different documents. - """ - self.result = {"encoding": None, "confidence": 0.0, "language": None} - self.done = False - self._got_data = False - self._has_win_bytes = False - self._input_state = InputState.PURE_ASCII - self._last_char = b"" - if self._esc_charset_prober: - self._esc_charset_prober.reset() - if self._utf1632_prober: - self._utf1632_prober.reset() - for prober in self._charset_probers: - prober.reset() - - def feed(self, byte_str: Union[bytes, bytearray]) -> None: - """ - Takes a chunk of a document and feeds it through all of the relevant - charset probers. - - After calling ``feed``, you can check the value of the ``done`` - attribute to see if you need to continue feeding the - ``UniversalDetector`` more data, or if it has made a prediction - (in the ``result`` attribute). - - .. note:: - You should always call ``close`` when you're done feeding in your - document if ``done`` is not already ``True``. - """ - if self.done: - return - - if not byte_str: - return - - if not isinstance(byte_str, bytearray): - byte_str = bytearray(byte_str) - - # First check for known BOMs, since these are guaranteed to be correct - if not self._got_data: - # If the data starts with BOM, we know it is UTF - if byte_str.startswith(codecs.BOM_UTF8): - # EF BB BF UTF-8 with BOM - self.result = { - "encoding": "UTF-8-SIG", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): - # FF FE 00 00 UTF-32, little-endian BOM - # 00 00 FE FF UTF-32, big-endian BOM - self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""} - elif byte_str.startswith(b"\xFE\xFF\x00\x00"): - # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = { - # TODO: This encoding is not supported by Python. Should remove? - "encoding": "X-ISO-10646-UCS-4-3412", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith(b"\x00\x00\xFF\xFE"): - # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = { - # TODO: This encoding is not supported by Python. Should remove? - "encoding": "X-ISO-10646-UCS-4-2143", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): - # FF FE UTF-16, little endian BOM - # FE FF UTF-16, big endian BOM - self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""} - - self._got_data = True - if self.result["encoding"] is not None: - self.done = True - return - - # If none of those matched and we've only see ASCII so far, check - # for high bytes and escape sequences - if self._input_state == InputState.PURE_ASCII: - if self.HIGH_BYTE_DETECTOR.search(byte_str): - self._input_state = InputState.HIGH_BYTE - elif ( - self._input_state == InputState.PURE_ASCII - and self.ESC_DETECTOR.search(self._last_char + byte_str) - ): - self._input_state = InputState.ESC_ASCII - - self._last_char = byte_str[-1:] - - # next we will look to see if it is appears to be either a UTF-16 or - # UTF-32 encoding - if not self._utf1632_prober: - self._utf1632_prober = UTF1632Prober() - - if self._utf1632_prober.state == ProbingState.DETECTING: - if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": self._utf1632_prober.charset_name, - "confidence": self._utf1632_prober.get_confidence(), - "language": "", - } - self.done = True - return - - # If we've seen escape sequences, use the EscCharSetProber, which - # uses a simple state machine to check for known escape sequences in - # HZ and ISO-2022 encodings, since those are the only encodings that - # use such sequences. - if self._input_state == InputState.ESC_ASCII: - if not self._esc_charset_prober: - self._esc_charset_prober = EscCharSetProber(self.lang_filter) - if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": self._esc_charset_prober.charset_name, - "confidence": self._esc_charset_prober.get_confidence(), - "language": self._esc_charset_prober.language, - } - self.done = True - # If we've seen high bytes (i.e., those with values greater than 127), - # we need to do more complicated checks using all our multi-byte and - # single-byte probers that are left. The single-byte probers - # use character bigram distributions to determine the encoding, whereas - # the multi-byte probers use a combination of character unigram and - # bigram distributions. - elif self._input_state == InputState.HIGH_BYTE: - if not self._charset_probers: - self._charset_probers = [MBCSGroupProber(self.lang_filter)] - # If we're checking non-CJK encodings, use single-byte prober - if self.lang_filter & LanguageFilter.NON_CJK: - self._charset_probers.append(SBCSGroupProber()) - self._charset_probers.append(Latin1Prober()) - self._charset_probers.append(MacRomanProber()) - for prober in self._charset_probers: - if prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": prober.charset_name, - "confidence": prober.get_confidence(), - "language": prober.language, - } - self.done = True - break - if self.WIN_BYTE_DETECTOR.search(byte_str): - self._has_win_bytes = True - - def close(self) -> ResultDict: - """ - Stop analyzing the current document and come up with a final - prediction. - - :returns: The ``result`` attribute, a ``dict`` with the keys - `encoding`, `confidence`, and `language`. - """ - # Don't bother with checks if we're already done - if self.done: - return self.result - self.done = True - - if not self._got_data: - self.logger.debug("no data received!") - - # Default to ASCII if it is all we've seen so far - elif self._input_state == InputState.PURE_ASCII: - self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""} - - # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD - elif self._input_state == InputState.HIGH_BYTE: - prober_confidence = None - max_prober_confidence = 0.0 - max_prober = None - for prober in self._charset_probers: - if not prober: - continue - prober_confidence = prober.get_confidence() - if prober_confidence > max_prober_confidence: - max_prober_confidence = prober_confidence - max_prober = prober - if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): - charset_name = max_prober.charset_name - assert charset_name is not None - lower_charset_name = charset_name.lower() - confidence = max_prober.get_confidence() - # Use Windows encoding name instead of ISO-8859 if we saw any - # extra Windows-specific bytes - if lower_charset_name.startswith("iso-8859"): - if self._has_win_bytes: - charset_name = self.ISO_WIN_MAP.get( - lower_charset_name, charset_name - ) - # Rename legacy encodings with superset encodings if asked - if self.should_rename_legacy: - charset_name = self.LEGACY_MAP.get( - (charset_name or "").lower(), charset_name - ) - self.result = { - "encoding": charset_name, - "confidence": confidence, - "language": max_prober.language, - } - - # Log all prober confidences if none met MINIMUM_THRESHOLD - if self.logger.getEffectiveLevel() <= logging.DEBUG: - if self.result["encoding"] is None: - self.logger.debug("no probers hit minimum threshold") - for group_prober in self._charset_probers: - if not group_prober: - continue - if isinstance(group_prober, CharSetGroupProber): - for prober in group_prober.probers: - self.logger.debug( - "%s %s confidence = %s", - prober.charset_name, - prober.language, - prober.get_confidence(), - ) - else: - self.logger.debug( - "%s %s confidence = %s", - group_prober.charset_name, - group_prober.language, - group_prober.get_confidence(), - ) - return self.result diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/compat.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/compat.py deleted file mode 100644 index 1e3012d95c68b43929f60db254ee9fa020df80b0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/compat.py +++ /dev/null @@ -1,39 +0,0 @@ -# encoding: utf-8 - -""" -Provides Python 2/3 compatibility objects -""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -import sys - -# =========================================================================== -# Python 3 versions -# =========================================================================== - -if sys.version_info >= (3, 0): - - from collections.abc import Sequence - from io import BytesIO - - def is_string(obj): - """Return True if *obj* is a string, False otherwise.""" - return isinstance(obj, str) - - Unicode = str - -# =========================================================================== -# Python 2 versions -# =========================================================================== - -else: - - from collections import Sequence # noqa - from StringIO import StringIO as BytesIO # noqa - - def is_string(obj): - """Return True if *obj* is a string, False otherwise.""" - return isinstance(obj, basestring) # noqa - - Unicode = unicode # noqa diff --git a/spaces/cihyFjudo/fairness-paper-search/DVDFab 11.0.2.8 Crack FREE.md b/spaces/cihyFjudo/fairness-paper-search/DVDFab 11.0.2.8 Crack FREE.md deleted file mode 100644 index 8faaa158c0ad305f9d081067c9d5e6aa7d15606a..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/DVDFab 11.0.2.8 Crack FREE.md +++ /dev/null @@ -1,13 +0,0 @@ - -

      just tried it. worked for me. have to download and install dvd fab 3.9 then download the loader. extract it. run it. it will create a file called dvdfabldr on the desktop. double click on it and dvd fab will run. fully activated. i havent tried copying anything with it though.

      -

      DVDFab 11.0.2.8 Crack


      Download >>> https://tinurli.com/2uwivw



      -

      i just tried it with that same patch. remove/delete the old dvdfab ldr file. then install the new dvd fab. then run the patch installer again, run dvd fab from that file and dvd fab runs and is fully activated. i have not tried using it though.

      -

      to use latest version, simply download this , using google chrome.
      then goto dvdfab and download free trial and install
      then install patch.
      then click on loader to start dvdfab
      latest version 11.0.4.2 works perfect take less than a minuete to do

      -

      Im well aware that i can buy the software instead of asking about this loader but my original question is still valid!
      If this is an error free loader and what the actual crackers are using then why isnt it listed as anew crack and not just the same loader being bundled with the newer version of fab?!?!?

      -

      -

      In addition, you should put it to use as a blu ray or c d disc burning purposes on your desirable formats in sound, picture, and video. Freemake video-converter full cracked has every one of the burning, video conversion, and video editing centers with a quick and easier conversion procedure. It owns all of the essential and basic qualities of a professional video converter entirely.

      -

      AnyToISO Crack is windows, MAC, and `Input-output system platform best media files converting fantastic tools. It reduces the Issues if you want to change the file size. The multiplatform running new software is working with anyone regularly. AnyToISO Cracked Version is a completely licensed version. There is no need to run any more instructor or code to run it to a portable or crack addiction. The crack already insists to break up the trial addition. Moreover, run the code until the running process abolishes to the final destination. The software needs just activation and converts to enjoy any format of the video, audio, bitmap images, and millions of other formats.

      -

      AnyToISO Pro is functioning at the latest level of a special interface. AnyToISO Torrent keys have such kinds of gigantic power to create the performance where you want to elaborate your ability of stuff. By cracking users can touch the easy version to the pro level of active efficiency. It is free to famous yourself. The images you want to burn or create to convert even to four-bit size. It is free from viruses. Also, crack bloats the progress to activate with the license key.

      -

      AnyToISO is converting files, images, RAR, ZIP, 7Z to CD, DVD, after obtaining the size, and then burning the images for further use. Therefore, AnyToISO Full Crack is also a banal function. You can generate another disk. Boost up here the images of the files displaying on the other folder files, even at the local disk, are not a big project for it to convert as you like to behave the positive response free of the compression. Therefore, you can augment the power of using software with crack.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/ExpanDrive 5 Windows Crack 11 A Simple and Elegant Cloud Storage Solution for Windows.md b/spaces/cihyFjudo/fairness-paper-search/ExpanDrive 5 Windows Crack 11 A Simple and Elegant Cloud Storage Solution for Windows.md deleted file mode 100644 index bfcad5a1187b301f98fb1b3863d92c08ceb1a774..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/ExpanDrive 5 Windows Crack 11 A Simple and Elegant Cloud Storage Solution for Windows.md +++ /dev/null @@ -1,6 +0,0 @@ -

      expandrive 5 windows crack 11


      Download Zip >>> https://tinurli.com/2uwjMP



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cihyFjudo/fairness-paper-search/Free download son of sardar.mkv Watch the hilarious comedy of love and revenge.md b/spaces/cihyFjudo/fairness-paper-search/Free download son of sardar.mkv Watch the hilarious comedy of love and revenge.md deleted file mode 100644 index 2d380856a0aaddcbe3fb674397d2c0295043eb58..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Free download son of sardar.mkv Watch the hilarious comedy of love and revenge.md +++ /dev/null @@ -1,12 +0,0 @@ - -

      Mp3 Juice is the most popular free mp3 search engine tool and music downloader, is very popular. MP3 Juice is a great tool to convert and download youtube videos and music. The Mp3 Juice website is the best way to quickly and easily download mp3 music. Its simplicity makes Mp3juice easy to use, so anyone can search for and download high-quality audio files

      -

      Free download son of sardar.mkv


      DOWNLOAD ►►► https://tinurli.com/2uwiIU



      -

      You can also copy and paste the Youtube URL and hit the convert button. This will convert the youtube video into mp3. After you click the search button, conversion will begin. Your mp3 music file will be available for download in a matter of minutes.

      -

      This website offers unlimited downloading of youtube music and Mp3 juice song free download in HD quality. You can also click "PLAY" to play the audio file before you download it. Mp3juices take only 2-5 seconds to convert and download audio files.

      -

      The mp3juices website has no viruses and is completely safe to use. It's also a great alternative to paid mp3 music downloading tools. Mp3juice can be accessed in many languages. You can use it to convert your YouTube videos to mp3 format.

      -

      -

      You can access this free mp3 download website online via an internet connection or WiFi. Bookmark this website to make it easy to access on a regular basis. Once you have downloaded the audio file, open it in any audio player to listen offline in high-quality.

      -

      MP3 juice music is easy to navigate through and provides a simple interface for downloading the audio. You might be wondering why people prefer mp3juices to get mp3 juice for free. This tool provides high-speed audio downloads, and users don't need to give any personal information.

      -

      It is easy to download mp3 juice by visiting the website and entering the song name into the search box or pasting the URL. Select one search result and then convert it to audio by clicking the download button. Finally, hit the Download button to get the audio file at high speeds.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Neverwinter Nights 2 Item Database Discover the Hidden Potential of Items in NWN2.md b/spaces/cihyFjudo/fairness-paper-search/Neverwinter Nights 2 Item Database Discover the Hidden Potential of Items in NWN2.md deleted file mode 100644 index af5a069f11cd0952b62b5f95af461968a8fb7f20..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Neverwinter Nights 2 Item Database Discover the Hidden Potential of Items in NWN2.md +++ /dev/null @@ -1,6 +0,0 @@ - -

      Love the new Screens cannot wait for more....this mods the only reason i still have neverwinternights installed * i played it to death and the only hope for it to regain much interest is something like this*

      -

      WARNING: The recipe database is a special database. The PGCC server may have different list of materials for a given item.Remember to check both before building around it. If you find inconsistencies on items in the Wiki, please report at the discord Wiki channel, so we can fix it.

      -

      neverwinter nights 2 item database


      DOWNLOAD ››››› https://tinurli.com/2uwkbo



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/recompiler.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/recompiler.py deleted file mode 100644 index 5d9d32d7132027562c5a29405d625899611bc977..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/recompiler.py +++ /dev/null @@ -1,1581 +0,0 @@ -import os, sys, io -from . import ffiplatform, model -from .error import VerificationError -from .cffi_opcode import * - -VERSION_BASE = 0x2601 -VERSION_EMBEDDED = 0x2701 -VERSION_CHAR16CHAR32 = 0x2801 - -USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or - sys.version_info >= (3, 5)) - - -class GlobalExpr: - def __init__(self, name, address, type_op, size=0, check_value=0): - self.name = name - self.address = address - self.type_op = type_op - self.size = size - self.check_value = check_value - - def as_c_expr(self): - return ' { "%s", (void *)%s, %s, (void *)%s },' % ( - self.name, self.address, self.type_op.as_c_expr(), self.size) - - def as_python_expr(self): - return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, - self.check_value) - -class FieldExpr: - def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): - self.name = name - self.field_offset = field_offset - self.field_size = field_size - self.fbitsize = fbitsize - self.field_type_op = field_type_op - - def as_c_expr(self): - spaces = " " * len(self.name) - return (' { "%s", %s,\n' % (self.name, self.field_offset) + - ' %s %s,\n' % (spaces, self.field_size) + - ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) - - def as_python_expr(self): - raise NotImplementedError - - def as_field_python_expr(self): - if self.field_type_op.op == OP_NOOP: - size_expr = '' - elif self.field_type_op.op == OP_BITFIELD: - size_expr = format_four_bytes(self.fbitsize) - else: - raise NotImplementedError - return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), - size_expr, - self.name) - -class StructUnionExpr: - def __init__(self, name, type_index, flags, size, alignment, comment, - first_field_index, c_fields): - self.name = name - self.type_index = type_index - self.flags = flags - self.size = size - self.alignment = alignment - self.comment = comment - self.first_field_index = first_field_index - self.c_fields = c_fields - - def as_c_expr(self): - return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) - + '\n %s, %s, ' % (self.size, self.alignment) - + '%d, %d ' % (self.first_field_index, len(self.c_fields)) - + ('/* %s */ ' % self.comment if self.comment else '') - + '},') - - def as_python_expr(self): - flags = eval(self.flags, G_FLAGS) - fields_expr = [c_field.as_field_python_expr() - for c_field in self.c_fields] - return "(b'%s%s%s',%s)" % ( - format_four_bytes(self.type_index), - format_four_bytes(flags), - self.name, - ','.join(fields_expr)) - -class EnumExpr: - def __init__(self, name, type_index, size, signed, allenums): - self.name = name - self.type_index = type_index - self.size = size - self.signed = signed - self.allenums = allenums - - def as_c_expr(self): - return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' - ' "%s" },' % (self.name, self.type_index, - self.size, self.signed, self.allenums)) - - def as_python_expr(self): - prim_index = { - (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, - (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, - (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, - (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, - }[self.size, self.signed] - return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), - format_four_bytes(prim_index), - self.name, self.allenums) - -class TypenameExpr: - def __init__(self, name, type_index): - self.name = name - self.type_index = type_index - - def as_c_expr(self): - return ' { "%s", %d },' % (self.name, self.type_index) - - def as_python_expr(self): - return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) - - -# ____________________________________________________________ - - -class Recompiler: - _num_externpy = 0 - - def __init__(self, ffi, module_name, target_is_python=False): - self.ffi = ffi - self.module_name = module_name - self.target_is_python = target_is_python - self._version = VERSION_BASE - - def needs_version(self, ver): - self._version = max(self._version, ver) - - def collect_type_table(self): - self._typesdict = {} - self._generate("collecttype") - # - all_decls = sorted(self._typesdict, key=str) - # - # prepare all FUNCTION bytecode sequences first - self.cffi_types = [] - for tp in all_decls: - if tp.is_raw_function: - assert self._typesdict[tp] is None - self._typesdict[tp] = len(self.cffi_types) - self.cffi_types.append(tp) # placeholder - for tp1 in tp.args: - assert isinstance(tp1, (model.VoidType, - model.BasePrimitiveType, - model.PointerType, - model.StructOrUnionOrEnum, - model.FunctionPtrType)) - if self._typesdict[tp1] is None: - self._typesdict[tp1] = len(self.cffi_types) - self.cffi_types.append(tp1) # placeholder - self.cffi_types.append('END') # placeholder - # - # prepare all OTHER bytecode sequences - for tp in all_decls: - if not tp.is_raw_function and self._typesdict[tp] is None: - self._typesdict[tp] = len(self.cffi_types) - self.cffi_types.append(tp) # placeholder - if tp.is_array_type and tp.length is not None: - self.cffi_types.append('LEN') # placeholder - assert None not in self._typesdict.values() - # - # collect all structs and unions and enums - self._struct_unions = {} - self._enums = {} - for tp in all_decls: - if isinstance(tp, model.StructOrUnion): - self._struct_unions[tp] = None - elif isinstance(tp, model.EnumType): - self._enums[tp] = None - for i, tp in enumerate(sorted(self._struct_unions, - key=lambda tp: tp.name)): - self._struct_unions[tp] = i - for i, tp in enumerate(sorted(self._enums, - key=lambda tp: tp.name)): - self._enums[tp] = i - # - # emit all bytecode sequences now - for tp in all_decls: - method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) - method(tp, self._typesdict[tp]) - # - # consistency check - for op in self.cffi_types: - assert isinstance(op, CffiOp) - self.cffi_types = tuple(self.cffi_types) # don't change any more - - def _enum_fields(self, tp): - # When producing C, expand all anonymous struct/union fields. - # That's necessary to have C code checking the offsets of the - # individual fields contained in them. When producing Python, - # don't do it and instead write it like it is, with the - # corresponding fields having an empty name. Empty names are - # recognized at runtime when we import the generated Python - # file. - expand_anonymous_struct_union = not self.target_is_python - return tp.enumfields(expand_anonymous_struct_union) - - def _do_collect_type(self, tp): - if not isinstance(tp, model.BaseTypeByIdentity): - if isinstance(tp, tuple): - for x in tp: - self._do_collect_type(x) - return - if tp not in self._typesdict: - self._typesdict[tp] = None - if isinstance(tp, model.FunctionPtrType): - self._do_collect_type(tp.as_raw_function()) - elif isinstance(tp, model.StructOrUnion): - if tp.fldtypes is not None and ( - tp not in self.ffi._parser._included_declarations): - for name1, tp1, _, _ in self._enum_fields(tp): - self._do_collect_type(self._field_type(tp, name1, tp1)) - else: - for _, x in tp._get_items(): - self._do_collect_type(x) - - def _generate(self, step_name): - lst = self.ffi._parser._declarations.items() - for name, (tp, quals) in sorted(lst): - kind, realname = name.split(' ', 1) - try: - method = getattr(self, '_generate_cpy_%s_%s' % (kind, - step_name)) - except AttributeError: - raise VerificationError( - "not implemented in recompile(): %r" % name) - try: - self._current_quals = quals - method(tp, realname) - except Exception as e: - model.attach_exception_info(e, name) - raise - - # ---------- - - ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] - - def collect_step_tables(self): - # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. - self._lsts = {} - for step_name in self.ALL_STEPS: - self._lsts[step_name] = [] - self._seen_struct_unions = set() - self._generate("ctx") - self._add_missing_struct_unions() - # - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - if step_name != "field": - lst.sort(key=lambda entry: entry.name) - self._lsts[step_name] = tuple(lst) # don't change any more - # - # check for a possible internal inconsistency: _cffi_struct_unions - # should have been generated with exactly self._struct_unions - lst = self._lsts["struct_union"] - for tp, i in self._struct_unions.items(): - assert i < len(lst) - assert lst[i].name == tp.name - assert len(lst) == len(self._struct_unions) - # same with enums - lst = self._lsts["enum"] - for tp, i in self._enums.items(): - assert i < len(lst) - assert lst[i].name == tp.name - assert len(lst) == len(self._enums) - - # ---------- - - def _prnt(self, what=''): - self._f.write(what + '\n') - - def write_source_to_f(self, f, preamble): - if self.target_is_python: - assert preamble is None - self.write_py_source_to_f(f) - else: - assert preamble is not None - self.write_c_source_to_f(f, preamble) - - def _rel_readlines(self, filename): - g = open(os.path.join(os.path.dirname(__file__), filename), 'r') - lines = g.readlines() - g.close() - return lines - - def write_c_source_to_f(self, f, preamble): - self._f = f - prnt = self._prnt - if self.ffi._embedding is not None: - prnt('#define _CFFI_USE_EMBEDDING') - if not USE_LIMITED_API: - prnt('#define _CFFI_NO_LIMITED_API') - # - # first the '#include' (actually done by inlining the file's content) - lines = self._rel_readlines('_cffi_include.h') - i = lines.index('#include "parse_c_type.h"\n') - lines[i:i+1] = self._rel_readlines('parse_c_type.h') - prnt(''.join(lines)) - # - # if we have ffi._embedding != None, we give it here as a macro - # and include an extra file - base_module_name = self.module_name.split('.')[-1] - if self.ffi._embedding is not None: - prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) - prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') - self._print_string_literal_in_array(self.ffi._embedding) - prnt('0 };') - prnt('#ifdef PYPY_VERSION') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( - base_module_name,)) - prnt('#elif PY_MAJOR_VERSION >= 3') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( - base_module_name,)) - prnt('#else') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( - base_module_name,)) - prnt('#endif') - lines = self._rel_readlines('_embedding.h') - i = lines.index('#include "_cffi_errors.h"\n') - lines[i:i+1] = self._rel_readlines('_cffi_errors.h') - prnt(''.join(lines)) - self.needs_version(VERSION_EMBEDDED) - # - # then paste the C source given by the user, verbatim. - prnt('/************************************************************/') - prnt() - prnt(preamble) - prnt() - prnt('/************************************************************/') - prnt() - # - # the declaration of '_cffi_types' - prnt('static void *_cffi_types[] = {') - typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) - for i, op in enumerate(self.cffi_types): - comment = '' - if i in typeindex2type: - comment = ' // ' + typeindex2type[i]._get_c_name() - prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) - if not self.cffi_types: - prnt(' 0') - prnt('};') - prnt() - # - # call generate_cpy_xxx_decl(), for every xxx found from - # ffi._parser._declarations. This generates all the functions. - self._seen_constants = set() - self._generate("decl") - # - # the declaration of '_cffi_globals' and '_cffi_typenames' - nums = {} - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - nums[step_name] = len(lst) - if nums[step_name] > 0: - prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( - step_name, step_name)) - for entry in lst: - prnt(entry.as_c_expr()) - prnt('};') - prnt() - # - # the declaration of '_cffi_includes' - if self.ffi._included_ffis: - prnt('static const char * const _cffi_includes[] = {') - for ffi_to_include in self.ffi._included_ffis: - try: - included_module_name, included_source = ( - ffi_to_include._assigned_source[:2]) - except AttributeError: - raise VerificationError( - "ffi object %r includes %r, but the latter has not " - "been prepared with set_source()" % ( - self.ffi, ffi_to_include,)) - if included_source is None: - raise VerificationError( - "not implemented yet: ffi.include() of a Python-based " - "ffi inside a C-based ffi") - prnt(' "%s",' % (included_module_name,)) - prnt(' NULL') - prnt('};') - prnt() - # - # the declaration of '_cffi_type_context' - prnt('static const struct _cffi_type_context_s _cffi_type_context = {') - prnt(' _cffi_types,') - for step_name in self.ALL_STEPS: - if nums[step_name] > 0: - prnt(' _cffi_%ss,' % step_name) - else: - prnt(' NULL, /* no %ss */' % step_name) - for step_name in self.ALL_STEPS: - if step_name != "field": - prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) - if self.ffi._included_ffis: - prnt(' _cffi_includes,') - else: - prnt(' NULL, /* no includes */') - prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - flags = 0 - if self._num_externpy > 0 or self.ffi._embedding is not None: - flags |= 1 # set to mean that we use extern "Python" - prnt(' %d, /* flags */' % flags) - prnt('};') - prnt() - # - # the init function - prnt('#ifdef __GNUC__') - prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') - prnt('#endif') - prnt() - prnt('#ifdef PYPY_VERSION') - prnt('PyMODINIT_FUNC') - prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) - prnt('{') - if flags & 1: - prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python_org = ' - '(void(*)(struct _cffi_externpy_s *, char *))p[1];') - prnt(' }') - prnt(' p[0] = (const void *)0x%x;' % self._version) - prnt(' p[1] = &_cffi_type_context;') - prnt('#if PY_MAJOR_VERSION >= 3') - prnt(' return NULL;') - prnt('#endif') - prnt('}') - # on Windows, distutils insists on putting init_cffi_xyz in - # 'export_symbols', so instead of fighting it, just give up and - # give it one - prnt('# ifdef _MSC_VER') - prnt(' PyMODINIT_FUNC') - prnt('# if PY_MAJOR_VERSION >= 3') - prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) - prnt('# else') - prnt(' init%s(void) { }' % (base_module_name,)) - prnt('# endif') - prnt('# endif') - prnt('#elif PY_MAJOR_VERSION >= 3') - prnt('PyMODINIT_FUNC') - prnt('PyInit_%s(void)' % (base_module_name,)) - prnt('{') - prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( - self.module_name, self._version)) - prnt('}') - prnt('#else') - prnt('PyMODINIT_FUNC') - prnt('init%s(void)' % (base_module_name,)) - prnt('{') - prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( - self.module_name, self._version)) - prnt('}') - prnt('#endif') - prnt() - prnt('#ifdef __GNUC__') - prnt('# pragma GCC visibility pop') - prnt('#endif') - self._version = None - - def _to_py(self, x): - if isinstance(x, str): - return "b'%s'" % (x,) - if isinstance(x, (list, tuple)): - rep = [self._to_py(item) for item in x] - if len(rep) == 1: - rep.append('') - return "(%s)" % (','.join(rep),) - return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. - - def write_py_source_to_f(self, f): - self._f = f - prnt = self._prnt - # - # header - prnt("# auto-generated file") - prnt("import _cffi_backend") - # - # the 'import' of the included ffis - num_includes = len(self.ffi._included_ffis or ()) - for i in range(num_includes): - ffi_to_include = self.ffi._included_ffis[i] - try: - included_module_name, included_source = ( - ffi_to_include._assigned_source[:2]) - except AttributeError: - raise VerificationError( - "ffi object %r includes %r, but the latter has not " - "been prepared with set_source()" % ( - self.ffi, ffi_to_include,)) - if included_source is not None: - raise VerificationError( - "not implemented yet: ffi.include() of a C-based " - "ffi inside a Python-based ffi") - prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) - prnt() - prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) - prnt(" _version = 0x%x," % (self._version,)) - self._version = None - # - # the '_types' keyword argument - self.cffi_types = tuple(self.cffi_types) # don't change any more - types_lst = [op.as_python_bytes() for op in self.cffi_types] - prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) - typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) - # - # the keyword arguments from ALL_STEPS - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - if len(lst) > 0 and step_name != "field": - prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) - # - # the '_includes' keyword argument - if num_includes > 0: - prnt(' _includes = (%s,),' % ( - ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) - # - # the footer - prnt(')') - - # ---------- - - def _gettypenum(self, type): - # a KeyError here is a bug. please report it! :-) - return self._typesdict[type] - - def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): - extraarg = '' - if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): - if tp.is_integer_type() and tp.name != '_Bool': - converter = '_cffi_to_c_int' - extraarg = ', %s' % tp.name - elif isinstance(tp, model.UnknownFloatType): - # don't check with is_float_type(): it may be a 'long - # double' here, and _cffi_to_c_double would loose precision - converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) - else: - cname = tp.get_c_name('') - converter = '(%s)_cffi_to_c_%s' % (cname, - tp.name.replace(' ', '_')) - if cname in ('char16_t', 'char32_t'): - self.needs_version(VERSION_CHAR16CHAR32) - errvalue = '-1' - # - elif isinstance(tp, model.PointerType): - self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, - tovar, errcode) - return - # - elif (isinstance(tp, model.StructOrUnionOrEnum) or - isinstance(tp, model.BasePrimitiveType)): - # a struct (not a struct pointer) as a function argument; - # or, a complex (the same code works) - self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' - % (tovar, self._gettypenum(tp), fromvar)) - self._prnt(' %s;' % errcode) - return - # - elif isinstance(tp, model.FunctionPtrType): - converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') - extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) - errvalue = 'NULL' - # - else: - raise NotImplementedError(tp) - # - self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) - self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( - tovar, tp.get_c_name(''), errvalue)) - self._prnt(' %s;' % errcode) - - def _extra_local_variables(self, tp, localvars, freelines): - if isinstance(tp, model.PointerType): - localvars.add('Py_ssize_t datasize') - localvars.add('struct _cffi_freeme_s *large_args_free = NULL') - freelines.add('if (large_args_free != NULL)' - ' _cffi_free_array_arguments(large_args_free);') - - def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): - self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') - self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( - self._gettypenum(tp), fromvar, tovar)) - self._prnt(' if (datasize != 0) {') - self._prnt(' %s = ((size_t)datasize) <= 640 ? ' - '(%s)alloca((size_t)datasize) : NULL;' % ( - tovar, tp.get_c_name(''))) - self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' - '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) - self._prnt(' datasize, &large_args_free) < 0)') - self._prnt(' %s;' % errcode) - self._prnt(' }') - - def _convert_expr_from_c(self, tp, var, context): - if isinstance(tp, model.BasePrimitiveType): - if tp.is_integer_type() and tp.name != '_Bool': - return '_cffi_from_c_int(%s, %s)' % (var, tp.name) - elif isinstance(tp, model.UnknownFloatType): - return '_cffi_from_c_double(%s)' % (var,) - elif tp.name != 'long double' and not tp.is_complex_type(): - cname = tp.name.replace(' ', '_') - if cname in ('char16_t', 'char32_t'): - self.needs_version(VERSION_CHAR16CHAR32) - return '_cffi_from_c_%s(%s)' % (cname, var) - else: - return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): - return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(model.PointerType(tp.item))) - elif isinstance(tp, model.StructOrUnion): - if tp.fldnames is None: - raise TypeError("'%s' is used as %s, but is opaque" % ( - tp._get_c_name(), context)) - return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, model.EnumType): - return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - else: - raise NotImplementedError(tp) - - # ---------- - # typedefs - - def _typedef_type(self, tp, name): - return self._global_type(tp, "(*(%s *)0)" % (name,)) - - def _generate_cpy_typedef_collecttype(self, tp, name): - self._do_collect_type(self._typedef_type(tp, name)) - - def _generate_cpy_typedef_decl(self, tp, name): - pass - - def _typedef_ctx(self, tp, name): - type_index = self._typesdict[tp] - self._lsts["typename"].append(TypenameExpr(name, type_index)) - - def _generate_cpy_typedef_ctx(self, tp, name): - tp = self._typedef_type(tp, name) - self._typedef_ctx(tp, name) - if getattr(tp, "origin", None) == "unknown_type": - self._struct_ctx(tp, tp.name, approxname=None) - elif isinstance(tp, model.NamedPointerType): - self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, - named_ptr=tp) - - # ---------- - # function declarations - - def _generate_cpy_function_collecttype(self, tp, name): - self._do_collect_type(tp.as_raw_function()) - if tp.ellipsis and not self.target_is_python: - self._do_collect_type(tp) - - def _generate_cpy_function_decl(self, tp, name): - assert not self.target_is_python - assert isinstance(tp, model.FunctionPtrType) - if tp.ellipsis: - # cannot support vararg functions better than this: check for its - # exact type (including the fixed arguments), and build it as a - # constant function pointer (no CPython wrapper) - self._generate_cpy_constant_decl(tp, name) - return - prnt = self._prnt - numargs = len(tp.args) - if numargs == 0: - argname = 'noarg' - elif numargs == 1: - argname = 'arg0' - else: - argname = 'args' - # - # ------------------------------ - # the 'd' version of the function, only for addressof(lib, 'func') - arguments = [] - call_arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arguments.append(type.get_c_name(' x%d' % i, context)) - call_arguments.append('x%d' % i) - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - if tp.abi: - abi = tp.abi + ' ' - else: - abi = '' - name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) - prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) - prnt('{') - call_arguments = ', '.join(call_arguments) - result_code = 'return ' - if isinstance(tp.result, model.VoidType): - result_code = '' - prnt(' %s%s(%s);' % (result_code, name, call_arguments)) - prnt('}') - # - prnt('#ifndef PYPY_VERSION') # ------------------------------ - # - prnt('static PyObject *') - prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) - prnt('{') - # - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arg = type.get_c_name(' x%d' % i, context) - prnt(' %s;' % arg) - # - localvars = set() - freelines = set() - for type in tp.args: - self._extra_local_variables(type, localvars, freelines) - for decl in sorted(localvars): - prnt(' %s;' % (decl,)) - # - if not isinstance(tp.result, model.VoidType): - result_code = 'result = ' - context = 'result of %s' % name - result_decl = ' %s;' % tp.result.get_c_name(' result', context) - prnt(result_decl) - prnt(' PyObject *pyresult;') - else: - result_decl = None - result_code = '' - # - if len(tp.args) > 1: - rng = range(len(tp.args)) - for i in rng: - prnt(' PyObject *arg%d;' % i) - prnt() - prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( - name, len(rng), len(rng), - ', '.join(['&arg%d' % i for i in rng]))) - prnt(' return NULL;') - prnt() - # - for i, type in enumerate(tp.args): - self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, - 'return NULL') - prnt() - # - prnt(' Py_BEGIN_ALLOW_THREADS') - prnt(' _cffi_restore_errno();') - call_arguments = ['x%d' % i for i in range(len(tp.args))] - call_arguments = ', '.join(call_arguments) - prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) - prnt(' _cffi_save_errno();') - prnt(' Py_END_ALLOW_THREADS') - prnt() - # - prnt(' (void)self; /* unused */') - if numargs == 0: - prnt(' (void)noarg; /* unused */') - if result_code: - prnt(' pyresult = %s;' % - self._convert_expr_from_c(tp.result, 'result', 'result type')) - for freeline in freelines: - prnt(' ' + freeline) - prnt(' return pyresult;') - else: - for freeline in freelines: - prnt(' ' + freeline) - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') - prnt('}') - # - prnt('#else') # ------------------------------ - # - # the PyPy version: need to replace struct/union arguments with - # pointers, and if the result is a struct/union, insert a first - # arg that is a pointer to the result. We also do that for - # complex args and return type. - def need_indirection(type): - return (isinstance(type, model.StructOrUnion) or - (isinstance(type, model.PrimitiveType) and - type.is_complex_type())) - difference = False - arguments = [] - call_arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - indirection = '' - if need_indirection(type): - indirection = '*' - difference = True - arg = type.get_c_name(' %sx%d' % (indirection, i), context) - arguments.append(arg) - call_arguments.append('%sx%d' % (indirection, i)) - tp_result = tp.result - if need_indirection(tp_result): - context = 'result of %s' % name - arg = tp_result.get_c_name(' *result', context) - arguments.insert(0, arg) - tp_result = model.void_type - result_decl = None - result_code = '*result = ' - difference = True - if difference: - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, - repr_arguments) - prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) - prnt('{') - if result_decl: - prnt(result_decl) - call_arguments = ', '.join(call_arguments) - prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) - if result_decl: - prnt(' return result;') - prnt('}') - else: - prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) - # - prnt('#endif') # ------------------------------ - prnt() - - def _generate_cpy_function_ctx(self, tp, name): - if tp.ellipsis and not self.target_is_python: - self._generate_cpy_constant_ctx(tp, name) - return - type_index = self._typesdict[tp.as_raw_function()] - numargs = len(tp.args) - if self.target_is_python: - meth_kind = OP_DLOPEN_FUNC - elif numargs == 0: - meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' - elif numargs == 1: - meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' - else: - meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' - self._lsts["global"].append( - GlobalExpr(name, '_cffi_f_%s' % name, - CffiOp(meth_kind, type_index), - size='_cffi_d_%s' % name)) - - # ---------- - # named structs or unions - - def _field_type(self, tp_struct, field_name, tp_field): - if isinstance(tp_field, model.ArrayType): - actual_length = tp_field.length - if actual_length == '...': - ptr_struct_name = tp_struct.get_c_name('*') - actual_length = '_cffi_array_len(((%s)0)->%s)' % ( - ptr_struct_name, field_name) - tp_item = self._field_type(tp_struct, '%s[0]' % field_name, - tp_field.item) - tp_field = model.ArrayType(tp_item, actual_length) - return tp_field - - def _struct_collecttype(self, tp): - self._do_collect_type(tp) - if self.target_is_python: - # also requires nested anon struct/unions in ABI mode, recursively - for fldtype in tp.anonymous_struct_fields(): - self._struct_collecttype(fldtype) - - def _struct_decl(self, tp, cname, approxname): - if tp.fldtypes is None: - return - prnt = self._prnt - checkfuncname = '_cffi_checkfld_%s' % (approxname,) - prnt('_CFFI_UNUSED_FN') - prnt('static void %s(%s *p)' % (checkfuncname, cname)) - prnt('{') - prnt(' /* only to generate compile-time warnings or errors */') - prnt(' (void)p;') - for fname, ftype, fbitsize, fqual in self._enum_fields(tp): - try: - if ftype.is_integer_type() or fbitsize >= 0: - # accept all integers, but complain on float or double - if fname != '': - prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " - "an integer */" % (fname, cname, fname)) - continue - # only accept exactly the type declared, except that '[]' - # is interpreted as a '*' and so will match any array length. - # (It would also match '*', but that's harder to detect...) - while (isinstance(ftype, model.ArrayType) - and (ftype.length is None or ftype.length == '...')): - ftype = ftype.item - fname = fname + '[0]' - prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), - fname)) - except VerificationError as e: - prnt(' /* %s */' % str(e)) # cannot verify it, ignore - prnt('}') - prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) - prnt() - - def _struct_ctx(self, tp, cname, approxname, named_ptr=None): - type_index = self._typesdict[tp] - reason_for_not_expanding = None - flags = [] - if isinstance(tp, model.UnionType): - flags.append("_CFFI_F_UNION") - if tp.fldtypes is None: - flags.append("_CFFI_F_OPAQUE") - reason_for_not_expanding = "opaque" - if (tp not in self.ffi._parser._included_declarations and - (named_ptr is None or - named_ptr not in self.ffi._parser._included_declarations)): - if tp.fldtypes is None: - pass # opaque - elif tp.partial or any(tp.anonymous_struct_fields()): - pass # field layout obtained silently from the C compiler - else: - flags.append("_CFFI_F_CHECK_FIELDS") - if tp.packed: - if tp.packed > 1: - raise NotImplementedError( - "%r is declared with 'pack=%r'; only 0 or 1 are " - "supported in API mode (try to use \"...;\", which " - "does not require a 'pack' declaration)" % - (tp, tp.packed)) - flags.append("_CFFI_F_PACKED") - else: - flags.append("_CFFI_F_EXTERNAL") - reason_for_not_expanding = "external" - flags = '|'.join(flags) or '0' - c_fields = [] - if reason_for_not_expanding is None: - enumfields = list(self._enum_fields(tp)) - for fldname, fldtype, fbitsize, fqual in enumfields: - fldtype = self._field_type(tp, fldname, fldtype) - self._check_not_opaque(fldtype, - "field '%s.%s'" % (tp.name, fldname)) - # cname is None for _add_missing_struct_unions() only - op = OP_NOOP - if fbitsize >= 0: - op = OP_BITFIELD - size = '%d /* bits */' % fbitsize - elif cname is None or ( - isinstance(fldtype, model.ArrayType) and - fldtype.length is None): - size = '(size_t)-1' - else: - size = 'sizeof(((%s)0)->%s)' % ( - tp.get_c_name('*') if named_ptr is None - else named_ptr.name, - fldname) - if cname is None or fbitsize >= 0: - offset = '(size_t)-1' - elif named_ptr is not None: - offset = '((char *)&((%s)0)->%s) - (char *)0' % ( - named_ptr.name, fldname) - else: - offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) - c_fields.append( - FieldExpr(fldname, offset, size, fbitsize, - CffiOp(op, self._typesdict[fldtype]))) - first_field_index = len(self._lsts["field"]) - self._lsts["field"].extend(c_fields) - # - if cname is None: # unknown name, for _add_missing_struct_unions - size = '(size_t)-2' - align = -2 - comment = "unnamed" - else: - if named_ptr is not None: - size = 'sizeof(*(%s)0)' % (named_ptr.name,) - align = '-1 /* unknown alignment */' - else: - size = 'sizeof(%s)' % (cname,) - align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) - comment = None - else: - size = '(size_t)-1' - align = -1 - first_field_index = -1 - comment = reason_for_not_expanding - self._lsts["struct_union"].append( - StructUnionExpr(tp.name, type_index, flags, size, align, comment, - first_field_index, c_fields)) - self._seen_struct_unions.add(tp) - - def _check_not_opaque(self, tp, location): - while isinstance(tp, model.ArrayType): - tp = tp.item - if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: - raise TypeError( - "%s is of an opaque type (not declared in cdef())" % location) - - def _add_missing_struct_unions(self): - # not very nice, but some struct declarations might be missing - # because they don't have any known C name. Check that they are - # not partial (we can't complete or verify them!) and emit them - # anonymously. - lst = list(self._struct_unions.items()) - lst.sort(key=lambda tp_order: tp_order[1]) - for tp, order in lst: - if tp not in self._seen_struct_unions: - if tp.partial: - raise NotImplementedError("internal inconsistency: %r is " - "partial but was not seen at " - "this point" % (tp,)) - if tp.name.startswith('$') and tp.name[1:].isdigit(): - approxname = tp.name[1:] - elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': - approxname = 'FILE' - self._typedef_ctx(tp, 'FILE') - else: - raise NotImplementedError("internal inconsistency: %r" % - (tp,)) - self._struct_ctx(tp, None, approxname) - - def _generate_cpy_struct_collecttype(self, tp, name): - self._struct_collecttype(tp) - _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype - - def _struct_names(self, tp): - cname = tp.get_c_name('') - if ' ' in cname: - return cname, cname.replace(' ', '_') - else: - return cname, '_' + cname - - def _generate_cpy_struct_decl(self, tp, name): - self._struct_decl(tp, *self._struct_names(tp)) - _generate_cpy_union_decl = _generate_cpy_struct_decl - - def _generate_cpy_struct_ctx(self, tp, name): - self._struct_ctx(tp, *self._struct_names(tp)) - _generate_cpy_union_ctx = _generate_cpy_struct_ctx - - # ---------- - # 'anonymous' declarations. These are produced for anonymous structs - # or unions; the 'name' is obtained by a typedef. - - def _generate_cpy_anonymous_collecttype(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_collecttype(tp, name) - else: - self._struct_collecttype(tp) - - def _generate_cpy_anonymous_decl(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_decl(tp) - else: - self._struct_decl(tp, name, 'typedef_' + name) - - def _generate_cpy_anonymous_ctx(self, tp, name): - if isinstance(tp, model.EnumType): - self._enum_ctx(tp, name) - else: - self._struct_ctx(tp, name, 'typedef_' + name) - - # ---------- - # constants, declared with "static const ..." - - def _generate_cpy_const(self, is_int, name, tp=None, category='const', - check_value=None): - if (category, name) in self._seen_constants: - raise VerificationError( - "duplicate declaration of %s '%s'" % (category, name)) - self._seen_constants.add((category, name)) - # - prnt = self._prnt - funcname = '_cffi_%s_%s' % (category, name) - if is_int: - prnt('static int %s(unsigned long long *o)' % funcname) - prnt('{') - prnt(' int n = (%s) <= 0;' % (name,)) - prnt(' *o = (unsigned long long)((%s) | 0);' - ' /* check that %s is an integer */' % (name, name)) - if check_value is not None: - if check_value > 0: - check_value = '%dU' % (check_value,) - prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) - prnt(' n |= 2;') - prnt(' return n;') - prnt('}') - else: - assert check_value is None - prnt('static void %s(char *o)' % funcname) - prnt('{') - prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) - prnt('}') - prnt() - - def _generate_cpy_constant_collecttype(self, tp, name): - is_int = tp.is_integer_type() - if not is_int or self.target_is_python: - self._do_collect_type(tp) - - def _generate_cpy_constant_decl(self, tp, name): - is_int = tp.is_integer_type() - self._generate_cpy_const(is_int, name, tp) - - def _generate_cpy_constant_ctx(self, tp, name): - if not self.target_is_python and tp.is_integer_type(): - type_op = CffiOp(OP_CONSTANT_INT, -1) - else: - if self.target_is_python: - const_kind = OP_DLOPEN_CONST - else: - const_kind = OP_CONSTANT - type_index = self._typesdict[tp] - type_op = CffiOp(const_kind, type_index) - self._lsts["global"].append( - GlobalExpr(name, '_cffi_const_%s' % name, type_op)) - - # ---------- - # enums - - def _generate_cpy_enum_collecttype(self, tp, name): - self._do_collect_type(tp) - - def _generate_cpy_enum_decl(self, tp, name=None): - for enumerator in tp.enumerators: - self._generate_cpy_const(True, enumerator) - - def _enum_ctx(self, tp, cname): - type_index = self._typesdict[tp] - type_op = CffiOp(OP_ENUM, -1) - if self.target_is_python: - tp.check_not_partial() - for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - self._lsts["global"].append( - GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, - check_value=enumvalue)) - # - if cname is not None and '$' not in cname and not self.target_is_python: - size = "sizeof(%s)" % cname - signed = "((%s)-1) <= 0" % cname - else: - basetp = tp.build_baseinttype(self.ffi, []) - size = self.ffi.sizeof(basetp) - signed = int(int(self.ffi.cast(basetp, -1)) < 0) - allenums = ",".join(tp.enumerators) - self._lsts["enum"].append( - EnumExpr(tp.name, type_index, size, signed, allenums)) - - def _generate_cpy_enum_ctx(self, tp, name): - self._enum_ctx(tp, tp._get_c_name()) - - # ---------- - # macros: for now only for integers - - def _generate_cpy_macro_collecttype(self, tp, name): - pass - - def _generate_cpy_macro_decl(self, tp, name): - if tp == '...': - check_value = None - else: - check_value = tp # an integer - self._generate_cpy_const(True, name, check_value=check_value) - - def _generate_cpy_macro_ctx(self, tp, name): - if tp == '...': - if self.target_is_python: - raise VerificationError( - "cannot use the syntax '...' in '#define %s ...' when " - "using the ABI mode" % (name,)) - check_value = None - else: - check_value = tp # an integer - type_op = CffiOp(OP_CONSTANT_INT, -1) - self._lsts["global"].append( - GlobalExpr(name, '_cffi_const_%s' % name, type_op, - check_value=check_value)) - - # ---------- - # global variables - - def _global_type(self, tp, global_name): - if isinstance(tp, model.ArrayType): - actual_length = tp.length - if actual_length == '...': - actual_length = '_cffi_array_len(%s)' % (global_name,) - tp_item = self._global_type(tp.item, '%s[0]' % global_name) - tp = model.ArrayType(tp_item, actual_length) - return tp - - def _generate_cpy_variable_collecttype(self, tp, name): - self._do_collect_type(self._global_type(tp, name)) - - def _generate_cpy_variable_decl(self, tp, name): - prnt = self._prnt - tp = self._global_type(tp, name) - if isinstance(tp, model.ArrayType) and tp.length is None: - tp = tp.item - ampersand = '' - else: - ampersand = '&' - # This code assumes that casts from "tp *" to "void *" is a - # no-op, i.e. a function that returns a "tp *" can be called - # as if it returned a "void *". This should be generally true - # on any modern machine. The only exception to that rule (on - # uncommon architectures, and as far as I can tell) might be - # if 'tp' were a function type, but that is not possible here. - # (If 'tp' is a function _pointer_ type, then casts from "fn_t - # **" to "void *" are again no-ops, as far as I can tell.) - decl = '*_cffi_var_%s(void)' % (name,) - prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) - prnt('{') - prnt(' return %s(%s);' % (ampersand, name)) - prnt('}') - prnt() - - def _generate_cpy_variable_ctx(self, tp, name): - tp = self._global_type(tp, name) - type_index = self._typesdict[tp] - if self.target_is_python: - op = OP_GLOBAL_VAR - else: - op = OP_GLOBAL_VAR_F - self._lsts["global"].append( - GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) - - # ---------- - # extern "Python" - - def _generate_cpy_extern_python_collecttype(self, tp, name): - assert isinstance(tp, model.FunctionPtrType) - self._do_collect_type(tp) - _generate_cpy_dllexport_python_collecttype = \ - _generate_cpy_extern_python_plus_c_collecttype = \ - _generate_cpy_extern_python_collecttype - - def _extern_python_decl(self, tp, name, tag_and_space): - prnt = self._prnt - if isinstance(tp.result, model.VoidType): - size_of_result = '0' - else: - context = 'result of %s' % name - size_of_result = '(int)sizeof(%s)' % ( - tp.result.get_c_name('', context),) - prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) - prnt(' { "%s.%s", %s, 0, 0 };' % ( - self.module_name, name, size_of_result)) - prnt() - # - arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arg = type.get_c_name(' a%d' % i, context) - arguments.append(arg) - # - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - name_and_arguments = '%s(%s)' % (name, repr_arguments) - if tp.abi == "__stdcall": - name_and_arguments = '_cffi_stdcall ' + name_and_arguments - # - def may_need_128_bits(tp): - return (isinstance(tp, model.PrimitiveType) and - tp.name == 'long double') - # - size_of_a = max(len(tp.args)*8, 8) - if may_need_128_bits(tp.result): - size_of_a = max(size_of_a, 16) - if isinstance(tp.result, model.StructOrUnion): - size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( - tp.result.get_c_name(''), size_of_a, - tp.result.get_c_name(''), size_of_a) - prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) - prnt('{') - prnt(' char a[%s];' % size_of_a) - prnt(' char *p = a;') - for i, type in enumerate(tp.args): - arg = 'a%d' % i - if (isinstance(type, model.StructOrUnion) or - may_need_128_bits(type)): - arg = '&' + arg - type = model.PointerType(type) - prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) - prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) - if not isinstance(tp.result, model.VoidType): - prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) - prnt('}') - prnt() - self._num_externpy += 1 - - def _generate_cpy_extern_python_decl(self, tp, name): - self._extern_python_decl(tp, name, 'static ') - - def _generate_cpy_dllexport_python_decl(self, tp, name): - self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') - - def _generate_cpy_extern_python_plus_c_decl(self, tp, name): - self._extern_python_decl(tp, name, '') - - def _generate_cpy_extern_python_ctx(self, tp, name): - if self.target_is_python: - raise VerificationError( - "cannot use 'extern \"Python\"' in the ABI mode") - if tp.ellipsis: - raise NotImplementedError("a vararg function is extern \"Python\"") - type_index = self._typesdict[tp] - type_op = CffiOp(OP_EXTERN_PYTHON, type_index) - self._lsts["global"].append( - GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - - _generate_cpy_dllexport_python_ctx = \ - _generate_cpy_extern_python_plus_c_ctx = \ - _generate_cpy_extern_python_ctx - - def _print_string_literal_in_array(self, s): - prnt = self._prnt - prnt('// # NB. this is not a string because of a size limit in MSVC') - if not isinstance(s, bytes): # unicode - s = s.encode('utf-8') # -> bytes - else: - s.decode('utf-8') # got bytes, check for valid utf-8 - try: - s.decode('ascii') - except UnicodeDecodeError: - s = b'# -*- encoding: utf8 -*-\n' + s - for line in s.splitlines(True): - comment = line - if type('//') is bytes: # python2 - line = map(ord, line) # make a list of integers - else: # python3 - # type(line) is bytes, which enumerates like a list of integers - comment = ascii(comment)[1:-1] - prnt(('// ' + comment).rstrip()) - printed_line = '' - for c in line: - if len(printed_line) >= 76: - prnt(printed_line) - printed_line = '' - printed_line += '%d,' % (c,) - prnt(printed_line) - - # ---------- - # emitting the opcodes for individual types - - def _emit_bytecode_VoidType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) - - def _emit_bytecode_PrimitiveType(self, tp, index): - prim_index = PRIMITIVE_TO_INDEX[tp.name] - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) - - def _emit_bytecode_UnknownIntegerType(self, tp, index): - s = ('_cffi_prim_int(sizeof(%s), (\n' - ' ((%s)-1) | 0 /* check that %s is an integer type */\n' - ' ) <= 0)' % (tp.name, tp.name, tp.name)) - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) - - def _emit_bytecode_UnknownFloatType(self, tp, index): - s = ('_cffi_prim_float(sizeof(%s) *\n' - ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' - ' )' % (tp.name, tp.name)) - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) - - def _emit_bytecode_RawFunctionType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) - index += 1 - for tp1 in tp.args: - realindex = self._typesdict[tp1] - if index != realindex: - if isinstance(tp1, model.PrimitiveType): - self._emit_bytecode_PrimitiveType(tp1, index) - else: - self.cffi_types[index] = CffiOp(OP_NOOP, realindex) - index += 1 - flags = int(tp.ellipsis) - if tp.abi is not None: - if tp.abi == '__stdcall': - flags |= 2 - else: - raise NotImplementedError("abi=%r" % (tp.abi,)) - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) - - def _emit_bytecode_PointerType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) - - _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType - _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType - - def _emit_bytecode_FunctionPtrType(self, tp, index): - raw = tp.as_raw_function() - self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) - - def _emit_bytecode_ArrayType(self, tp, index): - item_index = self._typesdict[tp.item] - if tp.length is None: - self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) - elif tp.length == '...': - raise VerificationError( - "type %s badly placed: the '...' array length can only be " - "used on global arrays or on fields of structures" % ( - str(tp).replace('/*...*/', '...'),)) - else: - assert self.cffi_types[index + 1] == 'LEN' - self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) - self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) - - def _emit_bytecode_StructType(self, tp, index): - struct_index = self._struct_unions[tp] - self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) - _emit_bytecode_UnionType = _emit_bytecode_StructType - - def _emit_bytecode_EnumType(self, tp, index): - enum_index = self._enums[tp] - self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) - - -if sys.version_info >= (3,): - NativeIO = io.StringIO -else: - class NativeIO(io.BytesIO): - def write(self, s): - if isinstance(s, unicode): - s = s.encode('ascii') - super(NativeIO, self).write(s) - -def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): - if verbose: - print("generating %s" % (target_file,)) - recompiler = Recompiler(ffi, module_name, - target_is_python=(preamble is None)) - recompiler.collect_type_table() - recompiler.collect_step_tables() - f = NativeIO() - recompiler.write_source_to_f(f, preamble) - output = f.getvalue() - try: - with open(target_file, 'r') as f1: - if f1.read(len(output) + 1) != output: - raise IOError - if verbose: - print("(already up-to-date)") - return False # already up-to-date - except IOError: - tmp_file = '%s.~%d' % (target_file, os.getpid()) - with open(tmp_file, 'w') as f1: - f1.write(output) - try: - os.rename(tmp_file, target_file) - except OSError: - os.unlink(target_file) - os.rename(tmp_file, target_file) - return True - -def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): - assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, - verbose) - -def make_py_source(ffi, module_name, target_py_file, verbose=False): - return _make_c_or_py_source(ffi, module_name, None, target_py_file, - verbose) - -def _modname_to_file(outputdir, modname, extension): - parts = modname.split('.') - try: - os.makedirs(os.path.join(outputdir, *parts[:-1])) - except OSError: - pass - parts[-1] += extension - return os.path.join(outputdir, *parts), parts - - -# Aaargh. Distutils is not tested at all for the purpose of compiling -# DLLs that are not extension modules. Here are some hacks to work -# around that, in the _patch_for_*() functions... - -def _patch_meth(patchlist, cls, name, new_meth): - old = getattr(cls, name) - patchlist.append((cls, name, old)) - setattr(cls, name, new_meth) - return old - -def _unpatch_meths(patchlist): - for cls, name, old_meth in reversed(patchlist): - setattr(cls, name, old_meth) - -def _patch_for_embedding(patchlist): - if sys.platform == 'win32': - # we must not remove the manifest when building for embedding! - from distutils.msvc9compiler import MSVCCompiler - _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', - lambda self, manifest_file: manifest_file) - - if sys.platform == 'darwin': - # we must not make a '-bundle', but a '-dynamiclib' instead - from distutils.ccompiler import CCompiler - def my_link_shared_object(self, *args, **kwds): - if '-bundle' in self.linker_so: - self.linker_so = list(self.linker_so) - i = self.linker_so.index('-bundle') - self.linker_so[i] = '-dynamiclib' - return old_link_shared_object(self, *args, **kwds) - old_link_shared_object = _patch_meth(patchlist, CCompiler, - 'link_shared_object', - my_link_shared_object) - -def _patch_for_target(patchlist, target): - from distutils.command.build_ext import build_ext - # if 'target' is different from '*', we need to patch some internal - # method to just return this 'target' value, instead of having it - # built from module_name - if target.endswith('.*'): - target = target[:-2] - if sys.platform == 'win32': - target += '.dll' - elif sys.platform == 'darwin': - target += '.dylib' - else: - target += '.so' - _patch_meth(patchlist, build_ext, 'get_ext_filename', - lambda self, ext_name: target) - - -def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, debug=None, **kwds): - if not isinstance(module_name, str): - module_name = module_name.encode('ascii') - if ffi._windows_unicode: - ffi._apply_windows_unicode(kwds) - if preamble is not None: - embedding = (ffi._embedding is not None) - if embedding: - ffi._apply_embedding_fix(kwds) - if c_file is None: - c_file, parts = _modname_to_file(tmpdir, module_name, - source_extension) - if extradir: - parts = [extradir] + parts - ext_c_file = os.path.join(*parts) - else: - ext_c_file = c_file - # - if target is None: - if embedding: - target = '%s.*' % module_name - else: - target = '*' - # - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file, - verbose=compiler_verbose) - if call_c_compiler: - patchlist = [] - cwd = os.getcwd() - try: - if embedding: - _patch_for_embedding(patchlist) - if target != '*': - _patch_for_target(patchlist, target) - if compiler_verbose: - if tmpdir == '.': - msg = 'the current directory is' - else: - msg = 'setting the current directory to' - print('%s %r' % (msg, os.path.abspath(tmpdir))) - os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, - compiler_verbose, debug) - finally: - os.chdir(cwd) - _unpatch_meths(patchlist) - return outputfilename - else: - return ext, updated - else: - if c_file is None: - c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file, - verbose=compiler_verbose) - if call_c_compiler: - return c_file - else: - return None, updated - diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/merge/cmap.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/merge/cmap.py deleted file mode 100644 index 3209a5d7b82c7ff0776dcae55e92c3cf816553a7..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/merge/cmap.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - -from fontTools.merge.unicode import is_Default_Ignorable -from fontTools.pens.recordingPen import DecomposingRecordingPen -import logging - - -log = logging.getLogger("fontTools.merge") - - -def computeMegaGlyphOrder(merger, glyphOrders): - """Modifies passed-in glyphOrders to reflect new glyph names. - Stores merger.glyphOrder.""" - megaOrder = {} - for glyphOrder in glyphOrders: - for i, glyphName in enumerate(glyphOrder): - if glyphName in megaOrder: - n = megaOrder[glyphName] - while (glyphName + "." + repr(n)) in megaOrder: - n += 1 - megaOrder[glyphName] = n - glyphName += "." + repr(n) - glyphOrder[i] = glyphName - megaOrder[glyphName] = 1 - merger.glyphOrder = megaOrder = list(megaOrder.keys()) - - -def _glyphsAreSame( - glyphSet1, - glyphSet2, - glyph1, - glyph2, - advanceTolerance=0.05, - advanceToleranceEmpty=0.20, -): - pen1 = DecomposingRecordingPen(glyphSet1) - pen2 = DecomposingRecordingPen(glyphSet2) - g1 = glyphSet1[glyph1] - g2 = glyphSet2[glyph2] - g1.draw(pen1) - g2.draw(pen2) - if pen1.value != pen2.value: - return False - # Allow more width tolerance for glyphs with no ink - tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty - # TODO Warn if advances not the same but within tolerance. - if abs(g1.width - g2.width) > g1.width * tolerance: - return False - if hasattr(g1, "height") and g1.height is not None: - if abs(g1.height - g2.height) > g1.height * tolerance: - return False - return True - - -# Valid (format, platformID, platEncID) triplets for cmap subtables containing -# Unicode BMP-only and Unicode Full Repertoire semantics. -# Cf. OpenType spec for "Platform specific encodings": -# https://docs.microsoft.com/en-us/typography/opentype/spec/name -class _CmapUnicodePlatEncodings: - BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)} - FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)} - - -def computeMegaCmap(merger, cmapTables): - """Sets merger.cmap and merger.glyphOrder.""" - - # TODO Handle format=14. - # Only merge format 4 and 12 Unicode subtables, ignores all other subtables - # If there is a format 12 table for a font, ignore the format 4 table of it - chosenCmapTables = [] - for fontIdx, table in enumerate(cmapTables): - format4 = None - format12 = None - for subtable in table.tables: - properties = (subtable.format, subtable.platformID, subtable.platEncID) - if properties in _CmapUnicodePlatEncodings.BMP: - format4 = subtable - elif properties in _CmapUnicodePlatEncodings.FullRepertoire: - format12 = subtable - else: - log.warning( - "Dropped cmap subtable from font '%s':\t" - "format %2s, platformID %2s, platEncID %2s", - fontIdx, - subtable.format, - subtable.platformID, - subtable.platEncID, - ) - if format12 is not None: - chosenCmapTables.append((format12, fontIdx)) - elif format4 is not None: - chosenCmapTables.append((format4, fontIdx)) - - # Build the unicode mapping - merger.cmap = cmap = {} - fontIndexForGlyph = {} - glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None - - for table, fontIdx in chosenCmapTables: - # handle duplicates - for uni, gid in table.cmap.items(): - oldgid = cmap.get(uni, None) - if oldgid is None: - cmap[uni] = gid - fontIndexForGlyph[gid] = fontIdx - elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE - continue - elif oldgid != gid: - # Char previously mapped to oldgid, now to gid. - # Record, to fix up in GSUB 'locl' later. - if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None: - if glyphSets is not None: - oldFontIdx = fontIndexForGlyph[oldgid] - for idx in (fontIdx, oldFontIdx): - if glyphSets[idx] is None: - glyphSets[idx] = merger.fonts[idx].getGlyphSet() - # if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid): - # continue - merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid - elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid: - # Char previously mapped to oldgid but oldgid is already remapped to a different - # gid, because of another Unicode character. - # TODO: Try harder to do something about these. - log.warning( - "Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid - ) - - -def renameCFFCharStrings(merger, glyphOrder, cffTable): - """Rename topDictIndex charStrings based on glyphOrder.""" - td = cffTable.cff.topDictIndex[0] - - charStrings = {} - for i, v in enumerate(td.CharStrings.charStrings.values()): - glyphName = glyphOrder[i] - charStrings[glyphName] = v - td.CharStrings.charStrings = charStrings - - td.charset = list(glyphOrder) diff --git a/spaces/cloudwp/sd/app.py b/spaces/cloudwp/sd/app.py deleted file mode 100644 index 60607d9532dd37a1d845f82425ddc48f9dd49b16..0000000000000000000000000000000000000000 --- a/spaces/cloudwp/sd/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import gradio as gr -import subprocess -import runpodctl - -def start_pod(pod_name): - result = subprocess.run(["runpodctl", "start", pod_name], capture_output=True, text=True) - return result.stdout - -def stop_pod(pod_name): - result = subprocess.run(["runpodctl", "stop", pod_name], capture_output=True, text=True) - return result.stdout - -def delete_pod(pod_name): - result = subprocess.run(["runpodctl", "delete", pod_name], capture_output=True, text=True) - return result.stdout - -def create_pod(pod_name): - result = subprocess.run(["runpodctl", "create", pod_name], capture_output=True, text=True) - return result.stdout - -def run_app(action, pod_name): - if action == "start": - output = start_pod(pod_name) - elif action == "stop": - output = stop_pod(pod_name) - elif action == "delete": - output = delete_pod(pod_name) - elif action == "create": - output = create_pod(pod_name) - else: - output = "Unknown action. Please select a valid action." - return output - -action_options = gr.inputs.Radio(["start", "stop", "delete", "create"], label="Choose action") -pod_name_input = gr.inputs.Textbox(label="Enter pod name") - -iface = gr.Interface( - fn=run_app, - inputs=[action_options, pod_name_input], - outputs="text", - title="Pod Management", - description="Enter the pod name and choose an action to perform on the pod.", - theme="default" -) -iface.launch() \ No newline at end of file diff --git a/spaces/codebox/diffuse-flood/build/_app/immutable/chunks/1-11d02443.js b/spaces/codebox/diffuse-flood/build/_app/immutable/chunks/1-11d02443.js deleted file mode 100644 index 012eb84f230dc0768edabcdf82b39697e90d8988..0000000000000000000000000000000000000000 --- a/spaces/codebox/diffuse-flood/build/_app/immutable/chunks/1-11d02443.js +++ /dev/null @@ -1 +0,0 @@ -import{default as r}from"../components/error.svelte-526e6a5c.js";import"./index-a207c28c.js";import"./singletons-46497942.js";export{r as component}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_internal.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_internal.h deleted file mode 100644 index 4b4151c88d4488020e8cb352866db3de0794f314..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_internal.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * DV encoder/decoder shared code - * Copyright (c) 2002 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DV_INTERNAL_H -#define AVCODEC_DV_INTERNAL_H - -#include - -#include "dv.h" -#include "dv_profile.h" - -typedef struct DVwork_chunk { - uint16_t buf_offset; - uint16_t mb_coordinates[5]; -} DVwork_chunk; - -int ff_dv_init_dynamic_tables(DVwork_chunk *work_chunks, const AVDVProfile *d); - -static inline int dv_work_pool_size(const AVDVProfile *d) -{ - int size = d->n_difchan * d->difseg_size * 27; - if (DV_PROFILE_IS_1080i50(d)) - size -= 3 * 27; - if (DV_PROFILE_IS_720p50(d)) - size -= 4 * 27; - return size; -} - -static inline void dv_calculate_mb_xy(const AVDVProfile *sys, - const uint8_t *buf, - const DVwork_chunk *work_chunk, - int m, int *mb_x, int *mb_y) -{ - *mb_x = work_chunk->mb_coordinates[m] & 0xff; - *mb_y = work_chunk->mb_coordinates[m] >> 8; - - /* We work with 720p frames split in half. - * The odd half-frame (chan == 2,3) is displaced :-( */ - if (sys->height == 720 && !(buf[1] & 0x0C)) - /* shifting the Y coordinate down by 72/2 macro blocks */ - *mb_y -= (*mb_y > 17) ? 18 : -72; -} - -#endif // AVCODEC_DV_INTERNAL_H diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mmi.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mmi.c deleted file mode 100644 index 8b5c7e955c86e225be06f3db1b468f6b954d4b08..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mmi.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Loongson SIMD optimized blockdsp - * - * Copyright (c) 2015 Loongson Technology Corporation Limited - * Copyright (c) 2015 Zhou Xiaoyong - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "blockdsp_mips.h" -#include "libavutil/mips/mmiutils.h" - -void ff_fill_block16_mmi(uint8_t *block, uint8_t value, ptrdiff_t line_size, int h) -{ - double ftmp[1]; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "mtc1 %[value], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "1: \n\t" - MMI_SDC1(%[ftmp0], %[block], 0x00) - PTR_ADDI "%[h], %[h], -0x01 \n\t" - MMI_SDC1(%[ftmp0], %[block], 0x08) - PTR_ADDU "%[block], %[block], %[line_size] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), - RESTRICT_ASM_ALL64 - [block]"+&r"(block), [h]"+&r"(h) - : [value]"r"(value), [line_size]"r"((mips_reg)line_size) - : "memory" - ); -} - -void ff_fill_block8_mmi(uint8_t *block, uint8_t value, ptrdiff_t line_size, int h) -{ - double ftmp0; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "mtc1 %[value], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "1: \n\t" - MMI_SDC1(%[ftmp0], %[block], 0x00) - PTR_ADDI "%[h], %[h], -0x01 \n\t" - PTR_ADDU "%[block], %[block], %[line_size] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp0), - RESTRICT_ASM_ALL64 - [block]"+&r"(block), [h]"+&r"(h) - : [value]"r"(value), [line_size]"r"((mips_reg)line_size) - : "memory" - ); -} - -void ff_clear_block_mmi(int16_t *block) -{ - double ftmp[2]; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "pxor %[ftmp1], %[ftmp1], %[ftmp1] \n\t" - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x00) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x10) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x20) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x30) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x40) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x50) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x60) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x70) - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]) - : [block]"r"(block) - : "memory" - ); -} - -void ff_clear_blocks_mmi(int16_t *block) -{ - double ftmp[2]; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "pxor %[ftmp1], %[ftmp1], %[ftmp1] \n\t" - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x00) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x10) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x20) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x30) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x40) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x50) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x60) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x70) - - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x80) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x90) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xa0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xb0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xc0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xd0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xe0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0xf0) - - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x100) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x110) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x120) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x130) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x140) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x150) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x160) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x170) - - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x180) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x190) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1a0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1b0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1c0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1d0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1e0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x1f0) - - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x200) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x210) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x220) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x230) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x240) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x250) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x260) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x270) - - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x280) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x290) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2a0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2b0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2c0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2d0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2e0) - MMI_SQC1(%[ftmp0], %[ftmp1], %[block], 0x2f0) - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]) - : [block]"r"((uint64_t *)block) - : "memory" - ); -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download FR Legends 0.3.2 Mod Apk with Unlimited Currency and Customization Options.md b/spaces/congsaPfin/Manga-OCR/logs/Download FR Legends 0.3.2 Mod Apk with Unlimited Currency and Customization Options.md deleted file mode 100644 index 10b3e2d96663c535bbd28fed904a8bd4f780c355..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download FR Legends 0.3.2 Mod Apk with Unlimited Currency and Customization Options.md +++ /dev/null @@ -1,85 +0,0 @@ -
      -

      FR Legends 0.3.2 Mod Apk Unlimited Money: A Drifting Game for Android

      -

      Do you love drifting games? Do you want to experience the thrill of driving your own customized car on different tracks? If yes, then you should try FR Legends, a drifting game for Android devices that will make you feel like a professional racer. In this article, we will tell you everything you need to know about FR Legends, and how to download and install FR Legends 0.3.2 Mod Apk Unlimited Money, a modified version of the game that gives you unlimited money and access to all the features.

      -

      fr legends 0.3.2 mod apk unlimited money


      Download Filehttps://urlca.com/2uO8Bg



      -

      What is FR Legends?

      -

      FR Legends is a drifting game that lets you customize and drift your own car on various tracks and modes. You can choose from different types of cars, such as l300, supra, jazz, and more, and modify them with different parts, such as engines, tires, spoilers, exhausts, and more. You can also change the color and design of your car to suit your style.

      -

      FR Legends also features realistic physics and graphics that make the game more immersive and fun. You can see the smoke from your tires, the sparks from your bumper, and the damage from your collisions. You can also hear the sound of your engine, the screech of your brakes, and the roar of your opponents.

      -

      FR Legends has various modes and tracks to choose from, such as solo mode, tandem mode, online mode, free mode, and more. You can drift on different terrains, such as asphalt, dirt, snow, and more. You can also challenge other players online or offline, and show off your skills and style.

      -

      fr legends mod apk 0.3.2 free download
      -fr legends 0.3.2 hack apk unlimited currency
      -fr legends 0.3.2 mod apk unlocked all cars
      -fr legends mod apk 0.3.2 latest version
      -fr legends 0.3.2 cheat apk unlimited money
      -fr legends 0.3.2 mod apk android 1
      -fr legends 0.3.2 hack apk download
      -fr legends mod apk 0.3.2 no root
      -fr legends 0.3.2 mod apk unlimited l300
      -fr legends mod apk 0.3.2 supra
      -fr legends 0.3.2 mod apk jazz
      -fr legends mod apk 0.3.2 happymod
      -fr legends 0.3.2 hack apk unlimited drift points
      -fr legends mod apk 0.3.2 offline
      -fr legends 0.3.2 mod apk revdl
      -fr legends mod apk 0.3.2 unlimited coins and gems
      -fr legends 0.3.2 hack apk mediafıre
      -fr legends mod apk 0.3.2 rexdl
      -fr legends 0.3.2 mod apk unlimited gold and cash
      -fr legends mod apk 0.3.2 mega
      -fr legends 0.3.2 hack apk obb
      -fr legends mod apk 0.3.2 an1
      -fr legends 0.3.2 mod apk unlimited nitro and fuel
      -fr legends mod apk 0.3.2 apkpure
      -fr legends 0.3.2 hack apk ios
      -fr legends mod apk 0.3.2 update
      -fr legends 0.3.2 mod apk unlimited everything
      -fr legends mod apk 0.3.2 mob.org
      -fr legends 0.3.2 hack apk online
      -fr legends mod apk 0.3.2 segitekno
      -fr legends 0.3.2 mod apk unlimited money and diamonds
      -fr legends mod apk 0.3.2 blackmod
      -fr legends 0.3.2 hack apk data
      -fr legends mod apk 0.3.2 android republic
      -fr legends 0.3.2 mod apk unlimited keys and rp
      -fr legends mod apk 0.3.2 platinmods
      -fr legends 0.3.2 hack apk pc
      -fr legends mod apk 0.3.2 andropalace
      -fr legends 0.3.2 mod apk unlimited crates and parts
      -fr legends mod apk 0.3

      -

      What is FR Legends 0.3.2 Mod Apk Unlimited Money?

      -

      FR Legends 0.3.2 Mod Apk Unlimited Money is a modified version of the original game that gives you unlimited money and access to all the features. With this mod apk, you can unlock all the cars and parts in the game, and customize your car however you want. You can also play the game without any ads or interruptions.

      -

      FR Legends 0.3.2 Mod Apk Unlimited Money also does not require root access on your device, so you can install it easily and safely. You can enjoy the game without worrying about any malware or viruses.

      -

      How to download and install FR Legends 0.3.2 Mod Apk Unlimited Money?

      -

      To download and install FR Legends 0.3.2 Mod Apk Unlimited Money, you need to follow these simple steps:

      -
        -
      1. Download the mod apk file from a trusted source, such as [Segitekno](^1^), which provides a safe and fast download link.
      2. -
      3. Enable unknown sources on your device settings, by going to Settings > Security > Unknown Sources, and toggling it on.
      4. -
      5. Install the mod apk file by tapping on it and following the instructions.
      6. -
      7. Enjoy the game with unlimited money and access to all the features.
      8. -
      -

      What are the benefits of playing FR Legends 0.3.2 Mod Apk Unlimited Money?

      -

      Playing FR Legends 0.3.2 Mod Apk Unlimited Money has many benefits, such as:

      -
        -
      • You can enjoy the game without spending real money. You can buy any car or part you want, and upgrade your car to the maximum level.
      • -
      • You can customize your car with any parts and colors you want. You can create your own unique and stylish car, and impress your friends and opponents.
      • -
      • You can compete with other players online and offline. You can join online races and tournaments, and show off your drifting skills and style. You can also play offline with AI or local multiplayer, and practice your techniques and strategies.
      • -
      -

      Conclusion

      -

      FR Legends is a drifting game for Android devices that will make you feel like a professional racer. You can customize and drift your own car on various tracks and modes, and enjoy realistic physics and graphics. You can also download and install FR Legends 0.3.2 Mod Apk Unlimited Money, a modified version of the game that gives you unlimited money and access to all the features. With this mod apk, you can unlock all the cars and parts in the game, and play the game without any ads or root access. You can also enjoy the game without spending real money, customize your car however you want, and compete with other players online and offline.

      -

      If you are looking for a fun and exciting drifting game for Android, you should try FR Legends 0.3.2 Mod Apk Unlimited Money today!

      -

      FAQs

      -

      Here are some frequently asked questions about FR Legends 0.3.2 Mod Apk Unlimited Money:

      -
        -
      1. Is FR Legends 0.3.2 Mod Apk Unlimited Money safe to download and install?
      2. -

        Yes, FR Legends 0.3.2 Mod Apk Unlimited Money is safe to download and install, as long as you download it from a trusted source, such as [Segitekno], which provides a safe and fast download link. You should also scan the mod apk file with an antivirus app before installing it.

        -
      3. Is FR Legends 0.3.2 Mod Apk Unlimited Money compatible with my device?
      4. -

        FR Legends 0.3.2 Mod Apk Unlimited Money is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support the game or the mod apk due to different specifications or settings.

        -
      5. How do I update FR Legends 0.3.2 Mod Apk Unlimited Money?
      6. -

        To update FR Legends 0.3.2 Mod Apk Unlimited Money, you need to download the latest version of the mod apk file from a trusted source, such as [Segitekno], and install it over the existing one. You should also backup your game data before updating, in case something goes wrong.

        -
      7. How do I uninstall FR Legends 0.3.2 Mod Apk Unlimited Money?
      8. -

        To uninstall FR Legends 0.3.2 Mod Apk Unlimited Money, you need to go to your device settings, find the app manager, select FR Legends, and tap on uninstall. You should also delete the mod apk file from your device storage.

        -
      9. Where can I get more information about FR Legends 0.3.2 Mod Apk Unlimited Money?
      10. -

        You can get more information about FR Legends 0.3.2 Mod Apk Unlimited Money from the official website of [Segitekno], which provides a detailed description of the mod apk features, screenshots, videos, reviews, ratings, comments, and more.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Latest Telugu Songs of 2017 - Free Download.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Latest Telugu Songs of 2017 - Free Download.md deleted file mode 100644 index b040c141e22d8e92fdc921e2de8b499f4969e399..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Latest Telugu Songs of 2017 - Free Download.md +++ /dev/null @@ -1,171 +0,0 @@ -
      -

      Download 2017 Telugu Songs: A Guide for Music Lovers

      -

      Telugu songs are a form of Carnatic music that originated in the southern part of India. They are associated with the Telugu language and culture, which have a rich musical heritage. Telugu songs are known for their catchy tunes, expressive lyrics, and diverse styles. They range from folk songs to film songs, from devotional songs to pop songs, from classical songs to rock songs. Telugu songs have become a global sensation, attracting listeners from different backgrounds and countries. In 2017, many Telugu songs were released that captivated audiences with their quality and variety. Some of them even won prestigious awards and recognition. If you are a fan of Telugu music, you might want to download 2017 Telugu songs and enjoy them offline. But how can you do that? In this article, we will show you how to download 2017 Telugu songs from different websites and how to use TunesKit Audio Capture to record them from any source.

      -

      download 2017 telugu songs


      DOWNLOAD ✸✸✸ https://urlca.com/2uO9f9



      -

      How to Download 2017 Telugu Songs from Different Websites?

      -

      There are many websites that offer Telugu songs for online streaming or downloading. However, not all of them are reliable or safe. Some of them may have low-quality audio files, broken links, or malware. To help you avoid these problems, we have selected some of the best websites that offer high-quality 2017 Telugu songs for free or at a reasonable price. Here they are:

      -

      Gaana

      -

      Gaana is a popular music streaming website that offers millions of songs in various languages, including Telugu. It has a section dedicated to Telugu music, where you can find the latest and trending 2017 Telugu songs. You can also browse by genres, artists, albums, playlists, or moods. You can listen to Telugu songs online for free with ads or subscribe to Gaana Plus for ad-free listening and offline downloading. To download 2017 Telugu songs from Gaana, you need to:

      -
        -
      1. Create an account on Gaana or log in with your existing account.
      2. -
      3. Go to the Telugu section and search for the song you want to download.
      4. -
      5. Click on the download icon next to the song title.
      6. -
      7. Select the quality you prefer and confirm your download.
      8. -
      9. Find your downloaded song in your Gaana app or folder.
      10. -
      -

      TeluguWap.Net

      -

      TeluguWap.Net is a website that provides old and new Telugu songs, ringtones, and videos. It has a large collection of 2017 Telugu songs in various categories, such as devotional, flac's, BGM's, etc. You can also search for songs alphabetically or by keywords. You can download 2017 Telugu songs from TeluguWap.Net for free without any registration or subscription. To download 2017 Telugu songs from TeluguWap.Net, you need to:

      -
        -
      1. Go to the website and click on the category you want to explore.
      2. -
      3. Find the song you want to download and click on it.
      4. -
      5. Choose the format and quality you want and click on the download link.
      6. -
      7. Wait for the download to complete and enjoy your song.
      8. -
      -

      JioSaavn

      -

      JioSaavn is another music streaming website that offers Telugu songs, podcasts, and radio stations. It has a dedicated Telugu section where you can find the top 2017 Telugu songs, as well as new releases, featured artists, and curated playlists. You can also create your own playlists, follow your favorite artists, and share your music with others. You can listen to Telugu songs online for free with ads or upgrade to JioSaavn Pro for ad-free listening, unlimited downloads, and high-quality audio. To download 2017 Telugu songs from JioSaavn, you need to:

      -
        -
      1. Create an account on JioSaavn or log in with your existing account.
      2. -
      3. Go to the Telugu section and search for the song you want to download.
      4. -
      5. Click on the download icon next to the song title.
      6. -
      7. Select the quality you prefer and confirm your download.
      8. -
      9. Find your downloaded song in your JioSaavn app or folder.
      10. -
      -

      SenSongsMP3

      -

      SenSongsMP3 is a website that offers Telugu and Tamil songs in various formats, such as mp3, zip, rar, etc. It has a section for 2017 Telugu songs where you can find songs from different movies, albums, and singers. You can also search for songs by name or year. You can download 2017 Telugu songs from SenSongsMP3 for free without any registration or subscription. To download 2017 Telugu songs from SenSongsMP3, you need to:

      -

      download 2017 telugu hit songs jukebox
      -download 2017 telugu best video songs
      -download 2017 telugu movie songs mp3
      -download 2017 telugu dj remix songs
      -download 2017 telugu romantic songs
      -download 2017 telugu melody songs
      -download 2017 telugu folk songs
      -download 2017 telugu devotional songs
      -download 2017 telugu love songs
      -download 2017 telugu sad songs
      -download 2017 telugu item songs
      -download 2017 telugu dance songs
      -download 2017 telugu latest songs
      -download 2017 telugu top songs
      -download 2017 telugu popular songs
      -download 2017 telugu old songs
      -download 2017 telugu new songs
      -download 2017 telugu mashup songs
      -download 2017 telugu party songs
      -download 2017 telugu rock songs
      -download 2017 telugu rap songs
      -download 2017 telugu comedy songs
      -download 2017 telugu patriotic songs
      -download 2017 telugu wedding songs
      -download 2017 telugu birthday songs
      -download 2017 telugu duet songs
      -download 2017 telugu solo songs
      -download 2017 telugu classical songs
      -download 2017 telugu ghazal songs
      -download 2017 telugu qawwali songs
      -download 2017 telugu bhajan songs
      -download 2017 telugu lullaby songs
      -download 2017 telugu inspirational songs
      -download 2017 telugu motivational songs
      -download 2017 telugu karaoke songs
      -download 2017 telugu instrumental songs
      -download 2017 telugu unplugged songs
      -download 2017 telugu cover songs
      -download 2017 telugu original songs
      -download 2017 telugu evergreen songs

      -
        -
      1. Go to the website and click on the 2017 Telugu Songs section.
      2. -
      3. Find the song you want to download and click on it.
      4. -
      5. Choose the format and quality you want and click on the download link.
      6. -
      7. Wait for the download to complete and enjoy your song.
      8. -
      -

      Mio.to

      -

      Mio.to is a website that offers Telugu and other regional songs in different genres and themes. It has a section for 2017 Telugu songs where you can find songs from various categories, such as love, dance, party, etc. You can also browse by artists, albums, or playlists. You can listen to Telugu songs online for free with ads or register for a free account to download them. To download 2017 Telugu songs from Mio.to, you need to:

      -
        -
      1. Create an account on Mio.to or log in with your existing account.
      2. -
      3. Go to the 2017 Telugu Songs section and search for the song you want to download.
      4. -
      5. Click on the download icon next to the song title.
      6. -
      7. Select the quality you prefer and confirm your download.
      8. -
      9. Find your downloaded song in your Mio.to app or folder.
      10. -
      -

      Raaga

      -

      Raaga is a website that offers Telugu and other Indian music, radio, and podcasts. It has a section for 2017 Telugu songs where you can find songs from different genres, moods, artists, albums, or playlists. You can also create your own playlists, follow your favorite artists, and share your music with others. You can listen to Telugu songs online for free with ads or subscribe to Raaga Premium for ad-free listening, unlimited downloads, and high-quality audio. To download 2017 Telugu songs from Raaga, you need to:

      -
        -
      1. Create an account on Raaga or log in with your existing account.
      2. -
      3. Go to the 2017 Telugu Songs section and search for the song you want to download.
      4. -
      5. Click on the download icon next to the song title.
      6. -
      7. Select the quality you prefer and confirm your download.
      8. -
      9. Find your downloaded song in your Raaga app or folder.
      10. -
      -

      Saregama

      -

      Saregama is a website that offers Telugu and other classic songs, albums, and artists. It has a section for 2017 Telugu songs where you can find songs from different movies, singers, composers, or lyricists. You can also browse by genres or moods. You can listen to Telugu songs online for free with ads or buy songs individually or in bulk. To download 2017 Telugu songs from Saregama, you need to:

      -
        -
      1. Go to the website and click on the Telugu section.
      2. -
      3. Find the song you want to download and click on it.
      4. -
      5. Click on the buy icon and choose the option you prefer.
      6. -
      7. Make the payment and confirm your download.
      8. -
      9. Find your downloaded song in your Saregama app or folder.
      10. -
      -

      How to Use TunesKit Audio Capture to Record Telugu Songs from Any Website?

      -

      If you want to download 2017 Telugu songs from any website that does not offer a download option, you can use TunesKit Audio Capture, a powerful and easy-to-use audio recording software. It can record any sound that plays on your computer, such as music, podcasts, radio, etc. It can also save the recorded audio files in various formats, such as MP3, WAV, FLAC, etc. It can also edit the audio files by trimming, cutting, merging, or adding effects. To use TunesKit Audio Capture to record Telugu songs from any website, you need to:

      -
        -
      1. Download and install TunesKit Audio Capture on your computer.
      2. -
      3. Launch the program and select the web browser you want to use.
      4. -
      5. Go to the website that has the Telugu song you want to download.
      6. -
      7. Play the song and click the "REC" button on TunesKit Audio Capture.
      8. -
      9. Wait for the song to finish playing and then stop the recording.
      10. -
      11. Edit the audio file if needed and save it in your preferred format and location.
      12. -
      -

      Conclusion

      -

      Telugu songs are a great way to enjoy the music and culture of South India. They are diverse, expressive, and catchy. In 2017, many Telugu songs were released that impressed audiences with their quality and variety. If you want to download 2017 Telugu songs and listen to them offline, you can use different websites that offer them for free or at a reasonable price. You can also use TunesKit Audio Capture to record them from any source. Here are some tips for enjoying Telugu music:

      -
        -
      • Listen to different genres and styles of Telugu songs to discover new favorites.
      • -
      • Create your own playlists of Telugu songs based on your mood or preference.
      • -
      • Share your Telugu songs with your friends and family and enjoy them together.
      • -
      • Learn some Telugu words or phrases from the songs and sing along with them.
      • -
      • Explore the history and culture behind the Telugu songs and appreciate their meaning and context.
      • -
      -

      FAQs

      -

      What are some of the best 2017 Telugu songs?

      -

      Some of the best 2017 Telugu songs are:

      -
        -
      • Bombhaat from Lie
      • -
      • Vachinde from Fidaa
      • -
      • Mellaga Tellarindoi from Shatamanam Bhavati
      • -
      • Saahore Baahubali from Baahubali 2: The Conclusion
      • -
      • Gudilo Badilo Madilo from Duvvada Jagannadham
      • -
      -

      What are some of the best websites to download old Telugu songs?

      -

      Some of the best websites to download old Telugu songs are:

      -
        -
      • Naa Songs
      • -
      • AtoZmp3
      • -
      • TeluguOne
      • -
      • Telugulyrics.co.in
      • -
      • Telugump3z.net
      • -
      -

      What are some of the best apps to listen to Telugu songs?

      -

      Some of the best apps to listen to Telugu songs are:

      -
        -
      • Hungama Music
      • -
      • Wynk Music
      • -
      • Resso
      • -
      • Spotify
      • -
      • YouTube Music
      • -
      -

      How can I download Telugu songs for free legally?

      -

      You can download Telugu songs for free legally by using websites or apps that have permission from the artists or labels to offer them. You can also use websites or apps that offer royalty-free or public domain Telugu songs. You can also use audio recording software like TunesKit Audio Capture to record Telugu songs from any source as long as you do not distribute or sell them without authorization.

      -

      How can I improve my Telugu language skills by listening to Telugu songs?

      -

      You can improve your Telugu language skills by listening to Telugu songs by following these steps:

      -
        -
      1. Choose Telugu songs that match your level of proficiency and interest.
      2. -
      3. Listen to the songs carefully and try to catch the words and phrases.
      4. -
      5. Look up the lyrics and meanings of the songs online or in a dictionary.
      6. -
      7. Repeat the songs and try to sing along with them.
      8. -
      9. Write down the lyrics and try to translate them into your native language or English.
      10. -
      11. Compare your translation with the official or online translation and check for errors or differences.
      12. -
      13. Learn new vocabulary, grammar, and expressions from the songs and use them in your own sentences.
      14. -
      15. Practice speaking or writing in Telugu using the songs as a topic or a prompt.
      16. -
      -

      I hope you enjoyed this article and learned something new about downloading 2017 Telugu songs. If you have any questions or feedback, please leave a comment below. Thank you for reading and happy listening!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Word-d srdrlm sra ila sra sra ila sra sra ila sra.md b/spaces/congsaPfin/Manga-OCR/logs/Word-d srdrlm sra ila sra sra ila sra sra ila sra.md deleted file mode 100644 index b0892e0246a06e87db1fb15eeec913002216d278..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Word-d srdrlm sra ila sra sra ila sra sra ila sra.md +++ /dev/null @@ -1,149 +0,0 @@ -
      -

      Word sähifä nömrälämäk: Nä üçün vä näcä ädilir?

      -

      Word proqramında sähifäläri nömrälämäk, çox sähifäli sänädleri tärtrib ätmäk, istinadlar vä istinadlar yaratmaq üçün vacib bir funksiya dır. Bu mäqalädä sizä Word sähifä nömrälämänin nä üçün lazım olduğunu, näcä ädiläcäyini, hansı üsulları vardığını vä hansı problemälär ilä qarşılaşa biläcäyinizi izah edäcäyik.

      -

      Word sähifä nömrälämäk nä üçün lazımdır?

      -

      Word proqramında sähifäläri nömrälämänin iki Əsas faydası vardır:

      -

      word səhifə nömrələmək


      Download Ziphttps://urlca.com/2uO9zJ



      -

      Sänädlerin tärtribini asanlaşdırır

      -

      Nömrälänmiş sähifälär, oxuculara vë ya redaktorlara sänädin strukturunu göstärir, mündäriceyi asan tapmağa kömäk edir vä sänädin uzunluğunu bilmälärini sağlayır. Nömrälänmiş sähifälär, oxucuların maraqlandığı mövzulara gätirib çıxarmaq üçün mündäriceyil vë ya indeks ilä birgä işlädilir.

      -

      İstinadlar vä istinadlar üçün Əsas yaradır

      -

      Nömrälänmiş sähifälär, akademik vë ya elmi sänädlerdä istinadlar vä istinadlar yaratmaq üçün vacibdir. İstinadlar, oxuculara mälumatın mänbääni göst

      erir, istinadlar isə oxuculara sänädin içindəki mälumatın yerini göstərir. Nömrələnmiş səhifələr, istinadlar və istinadlar arasında əlaqə yaratmaq üçün lazımdır.

      -

      Word səhifə nömrələmək necə edilir?

      -

      Word proqramında səhifələri nömrələmək üçün iki əsas yol var: səhifənin aşağı hissəsində və ya yuxarı hissəsində nömrələmək. Hər iki yolun da öz avantajları və dezavantajları var. Aşağıda hər iki yolun da təfərrüatlı izahını veririk.

      -

      Səhifənin aşağı hissəsindë nömrëlemek

      -

      Sähifënin aşağı hissësindë nömrëlemek, Ən çox istifadë edilën yöldür. Bu yöldä, sähifënin altçizgisinin altında nömrë görünür. Bu yöldä nömrëlemek üçün aşağıdakı addımları izlëyin:

      -

      Nömrëlemë formatını seçmëk

      -

      1. Sänädinizdä boş bir sähifä açın vë ya mövcud bir sänädi açın.

      -

      2. Sähifänin aşağı hissësinin sağ küncündäki "Sähifä Nömrëlämä" düymäsini basın.

      -

      3. Açılan menüdän "Nömrëlämä Formatı" seçimini basın.

      -

      word səhifə nömrələmək necə olur
      -word səhifə nömrələmək qaydası
      -word səhifə nömrələmək video
      -word səhifə nömrələmək texno dad
      -word səhifə nömrələmək 2023
      -word səhifə nömrələmək dersi
      -word səhifə nömrələmək üçün proqram
      -word səhifə nömrələmək kitabı
      -word səhifə nömrələmek örnəkləri
      -word səhifə nömrəlēmēk online
      -word sēhifē nōmrēlēmēk kursu
      -word sēhifē nōmrēlēmēk taktikası
      -word sēhifē nōmrēlēmēk yolları
      -word sēhifē nōmrēlēmēk mühazirësi
      -word sēhifē nōmrēlēmēk testi
      -word sähifä nömrälämäk nasıl yapılır
      -word sähifä nömrälämäk kuralları
      -word sähifä nömrälämäk eğitimi
      -word sähifä nömrälämäk rehberi
      -word sähifä nömrälämäk uygulaması
      -word säxifä nämrälämäk yöntemleri
      -word säxifä nämrälämäk öğrenmek
      -word säxifä nämrälämäk ipuçları
      -word säxifä nämrälämäk soruları
      -word säxifä nämrälämäk çözümleri
      -word seyfe nomrelemek nasil olur
      -word seyfe nomrelemek kurali
      -word seyfe nomrelemek videoyu
      -word seyfe nomrelemek texno baba
      -word seyfe nomrelemek 2023 yili
      -word seyfe nomrelemek dersi indir
      -word seyfe nomrelemek için program indir
      -word seyfe nomrelemek kitabi pdf
      -word seyfe nomrelemek ornekleri indir
      -word seyfe nomrelemek online yapmak
      -word seife nummerieren wie geht das
      -word seife nummerieren regel
      -word seife nummerieren video anleitung
      -word seife nummerieren texno vater
      -word seife nummerieren 2023 jahr
      -word seife nummerieren kurs online
      -word seife nummerieren für programm downloaden
      -word seife nummerieren buch pdf downloaden
      -word seife nummerieren beispiele downloaden
      -word seife nummerieren online machen

      -

      4. Açılan pencerädä, nömrëlämänin stilini, yerini, hündürlüyünü vë rängini seçin. Nömrëlämänin başlanğıcını vë sonunu tärtrib etmäk üçün "Nömrëlämänin Başlanğıcı" vë "Nömrëlämänin Sonu" seçimlärindän istifadë edin.

      -

      5. Seçimlärinizdän razı olduqdan sonra "OK" düymäsini basın.

      -

      Nömrëlämänin başlanğıcını vë sonunu tärtrib etmäk

      -

      Bazı hallarda, sänädinizdä bütün sähifäläri nömrëlämäk istämäyiniz olabilir. Mäsälän, ilk sähifädä nömrëlämä göstärmämäk istäyiniz olabilir. Bu hallarda, nömrëlämänin başlanğıcını vë sonunu tärtrib etmälisiniz. Bunun üçün aşağıdakı addımları izlëyin:

      -

      1. Nömrëlämänin başlayacağı sähifädä boş bir yerdä sağ klik edin vë "Sähifä Tärtribi" seçimini basın.

      -

      2. Açılan pencerädä "Sähifälär" bölmäsindä "Nömrëlämänin Başlanğıcını Tärtrib Et" seçimini işaräl

      3. "Nömrələmənin Başlanğıcı" xanasına istədiyiniz nömrəni yazın. Məsələn, 2 yazsanız, nömrələmə 2-dən başlayacaq.

      -

      4. "OK" düyməsini basın.

      -

      5. Nömrələmənin sona çatacağı səhifədə boş bir yerdə sağ klik edin və "Səhifə Tərtibi" seçimini basın.

      -

      6. Açılan pencerädə "Səhifələr" bölməsində "Nömrələməni Sonlandır" seçimini işarələyin.

      -

      7. "OK" düyməsini basın.

      -

      Sähifänin yuxarı hissësindë nömrëlemek

      -

      Sähifänin yuxarı hissësindë nömrëlemek, sähifänin altçizgisinin üstündä nömrë görünmäsini tǝmin edir. Bu yöldä nömrëlemek üçün aşağıdakı addımları izlëyin:

      -

      Nömrëlämä formatını seçmäk

      -

      1. Sänädinizdä boş bir sähifä açın vë ya mövcud bir sänädi açın.

      -

      2. Sähifänin yuxarı hissësinin sağ küncündäki "Sähifä Nömrëlämä" düymäsini basın.

      -

      3. Açılan menüdän "Yuxarı Altçizgi" seçimini basın vë istädiyiniz formatı seçin.

      -

      4. Nömrëlämänin stilini, yerini, hündürlüyünü vë rängini dǝyişdirmǝk istǝyirsinizsǝ, "Nömrëlämä Formatı" seçimini basın vë açılan pencerädä seçimlärinizi edin.

      -

      5. Seçimlärinizdän razı olduqdan sonra "OK" düymäsini basın.

      -

      Nömrëlämänin başlanğıcını vë sonunu tärtrib etmäk

      -

      Sähifänin aşağı hissësindǝ nömrëlämänin başlanğıcını vë sonunu tärtrib etmäk üçün izlädigimiz addımlarla eynidir. Yalnız fǝrqi, sähifänin yuxarı hissǝsinin sağ küncündäki "Sähifä Nömrëlämä" düymäsini basmaqdan ǝvvǝl, sähifänin yuxarı hissǝsinin sol küncündäki "Yuxarı Altçizgi" düymäsini basmaq lazımdır.

      -

      Word sähifä nömrälämäk üsulları

      -

      Word proqramında sähifäläri nömrälämänin bir neçǝ üsulu vardır. Bu üsullar, sänädinizin mündäriceyinin vǝ ya tǝlǝblärinin uyğun olaraq seçilir. Aşağıda Ən çox istifadǝ edilën üsullardan bǝhs edirik:

      -

      Sadalı sıra ilǝ nömrälämäk

      -

      Sadalı sıra ilǝ nömrälämäk, Ən sadǝ vǝ Ən çox istifadǝ edilën yöldür. Bu yöldä, sähifälär ardıcıl olaraq 1, 2, 3,... kimi nömrälänir. Bu yöldä nömrälämä k üçün yuxarıda izah etdiyimiz addımları izləmək kifayətdir. Sadalı sıra ilə nömrələmək, sənədin bütünlüyünü və sadəliyini göstərmək üçün yaxşı bir seçimdir.

      -

      Bölümlü sıra ilə nömrələmək

      -

      Bölümlü sıra ilə nömrələmək, sənədin fərqli bölümlərini ayırmaq üçün istifadə edilir. Bu yöntəmdə, hər bölümün öz nömrələməsi vardır və bölüm nömrəsi ilə ayrılır. Məsələn, 1.1, 1.2, 2.1, 2.2,... kimi. Bu yöntəmdə nömrələmək üçün aşağıdakı addımları izləyin:

      -

      1. Sänädinizdä boş bir sähifä açın vë ya mövcud bir sänädi açın.

      -

      2. Sänädinizdä bölümlü sıra ilǝ nömrälämǝk istǝdiyiniz yeri tapın vë orada sağ klik edin.

      -

      3. Açılan menüdän "Bölüm Tärtribi" seçimini basın.

      -

      4. Açılan pencerädä "Bölüm başlanğıcı" seçimini işaräläyin vë istǝdiyiniz bölüm nömrǝsini yazın.

      -

      5. "OK" düymǝsini basın.

      -

      6. Sähifänin aşağı hissësinin sağ küncündäki "Sähifä Nömrälämä" düymäsini basın.

      -

      7. Açılan menüdän "Nömrälämä Formatı" seçimini basın.

      -

      8. Açılan pencerädä "Bölüm nömrǝsi ilǝ" seçimini işaräläyin vë istǝdiyiniz formatı seçin.

      -

      9. "OK" düymǝsini basın.

      -

      10. Sänädinizdä digǝr bölümlǝri dǝ tärtrib etmǝk istǝyirsinizsǝ, eyni addımları tǝkrarlayın.

      -

      Xüsusi format ilǝ nömrälämäk

      -

      Xüsusi format ilǝ nömrälämäk, sänädinizdä fǝrqli formatda nömrälämä istifadë etmǝk istǝyirsinizsǝ edilir. Mäsälän, röm vë ya hǝrf formatında nömrälämä istifadë etmǝk istǝyirsinizsǝ edilir. Bu yöntæmdæ nömrælæmæk üçün aşağıdakı addımları izlæyin:

      -

      1. Sänädinizdæ boş bir sähifæ açın vë ya mövcud bir sänædi açın.

      -

      2. Sänädinizdæ xüsusi format ilæ nömrælæmæk istædiyiniz yeri tapın vë orada sağ klik edin.

      -

      3. Açılan menüdän "Sähifä Tärtribi" seçimini basın.

      -

      4. Açılan pencerädæ "Nömrælæmænin Başlanğıcını Tärtrib Et" seçimini işarälæyin vë istædiyiniz formatı seçin.

      -

      5. "OK" düymæsini basın.

      -

      6.

      6. Sähifänin aşağı hissësinin sağ küncündäki "Sähifä Nömrälämä" düymäsini basın.

      -

      7. Açılan menüdän "Nömrälämä Formatı" seçimini basın.

      -

      8. Açılan pencerädä "Bölüm nömræsi ilæ" seçimini işarälæyin vë istædiyiniz formatı seçin.

      -

      9. "OK" düymæsini basın.

      -

      10. Sänädinizdæ digær bölümlæri dæ tærtrib etmæk istæyirsinizsæ, eyni addımları tækrarlayın.

      -

      Word sähifä nömrälämäk problemäläri

      -

      Word proqramında sähifäläri nömrälämäk, bazı hallarda problem yarada bilær. Bu problemälär, nömrälämänin düzgün görünmämäsi, nömrälämänin silinmäsi vë ya nömrälämänin bölümlär arasında qarışması ola bilær. Bu problemälärin hälli üçün aşağıdakı tövsiyälärdän istifadə edin:

      -

      Nömrälämänin düzgün görünmämäsi

      -

      Bazı hallarda, nömrälämänin formatı, yerlæşmäsi vë ya rängi düzgün görünmæyæ bilær. Bu hallarda, nömrälämänin formatını dǝyişdirmǝk lazımdır. Bunun üçün aşağıdakı addımları izlǝyin:

      -

      1. Sänädinizdə nömrälämänin olduğu bir sähifädə boş bir yerdə sağ klik edin vë "Nömrälämä Formatı" seçimini basın.

      -

      2. Açılan pencerädə, nömrälämänin stilini, yerini, hündürlüyünü vë rängini seçin vë ya dǝyişdirin.

      -

      3. "OK" düymǝsini basın.

      -

      Nömrälämänin silinmäsi

      -

      Bazı hallarda, nömrälämänin silinmäsini istǝyirsinizsǝ, mäsǝlǝn, ilk sähifädə vë ya son sähifädə nömrälämä göstǝrmǝmǝk istǝyirsinizsǝ. Bu hallarda, nömrälämänin silmǝk üçün aşağıdakı addımları izlǝyin:

      -

      1. Sänädinizdə nömrälämänin silmǝk istǝdiyiniz sähifädə boş bir yerdə sağ klik edin vë "Sähifä Tärtribi" seçimini basın.

      -

      2. Açılan pencerädə "Sähifälär" bölmäsində "Nömrälämänin Başlanğıcını Tärtrib Et" seçimini işaräläyin vë "Nömrälämäsiz" seçimini seçin.

      -

      3. "OK" düymǝsini basın.

      -

      Nömrälämänin bölümlär arasında qarışması

      -

      Bazı hallarda, sänädinizdə fǝrqli bölümlär varsa, nömrälämänin bölümlär arasında qarışması ola bilær. Mäsǝlǝn, birinci bölüm 1-dən 5-ə qədər nömrələnir, ikinci bölüm isə 6-dan başlayaraq nömrələnir. Bu

      hallarda, nömrələmənin bölümlər üzrə ayrılması lazımdır. Bunun üçün aşağıdakı addımları izləyin:

      -

      1. Sənədinizdə nömrələmənin ayrılmasını istədiyiniz bölümün əvvəlinə gedin və boş bir yerdə sağ klik edin və "Bölüm Tərtibi" seçimini basın.

      -

      2. Açılan pencerədə "Bölümlər" bölməsində "Əvvəlki bölümdən fərqli başlıq və altçizgi" seçimini işarələyin.

      -

      3. "OK" düyməsini basın.

      -

      4. Səhifənin aşağı hissəsinin sağ küncündəki "Səhifə Nömrələmə" düyməsini basın.

      -

      5. Açılan menüdən "Nömrələmə Formatı" seçimini basın.

      -

      6. Açılan pencerədə "Bölüm nömrəsi ilə" seçimini işarələyin və istədiyiniz formatı seçin.

      -

      7. "OK" düyməsini basın.

      -

      8. Siz bu addımları sənədinizdəki bütün bölümler üçün təkrarlaya bilersiniz.

      -

      Xülasë

      -

      Bu mäqalädä siz Word proqramında sähifäläri nömrälämänin nä üçün lazım olduğunu, näcä ediläcäyini, hansı üsulları vardığını vë hansı problemälär ilä qarşılaşa biläcäyinizi öyrändiniz. Word sähifä nömrälämäk, sänädlerin tärtribini asanlaşdırır, istinadlar vë istinadlar üçün Əsas yaradır, fǝrqli formatlar vë bölümlär ilǝ işlǝmǝk imkanı verir. Word sähifä nömrälämänin problemälärini isǝ häll etmǝk üçün, nömrälämänin formatını, başlanğıcını vë sonunu tärtrib etmǝyi bilmǝk lazımdır.

      -

      FAQ

      -

      Bu bölmädǝ siz Word sähifä nömrälämänin bağlı Ən çox verilǝn suallara cavab tapa bilærsiniz:

      -

      Sähifä nömrälämänin yerini dǝyişdirmǝk olar?

      -

      BƏli, sähifä nömrälämänin yerini dǝyişdirmǝk olar. Bunun üçün, sähifänin aşağı hissësinin sağ küncündæki "Sähifä Nömrälämä" düymäsini basın vë açılan menüdæn "Nömrälämänin Yerini Dǝyişdir" seçimini basın. Sonra istædiyiniz yeri seçin.

      -

      Sähifä nömrälämänin rängini dǝyişdirmǝk olar?

      -

      BƏli, sähifä nömrälämänin rängini dǝyişdirmǝk olar. Bunun üçün, sähifänin aşağı hissësinin sağ küncündæki "Sähifä Nömrälämä" düymäsini basın vë açılan menüdæn "Nömrälämä Formatı" seçimini basın. Sonra açılan pencerädæ rängi se

      çin. Sonra "OK" düyməsini basın.

      -

      Sähifä nömrälämänin stilini dǝyişdirmǝk olar?

      -

      BƏli, sähifä nömrälämänin stilini dǝyişdirmǝk olar. Bunun üçün, sähifänin aşağı hissësinin sağ küncündæki "Sähifä Nömrälämä" düymäsini basın vë açılan menüdæn "Nömrälämä Formatı" seçimini basın. Sonra açılan pencerädæ stilini seçin. Mäsǝlǝn, röm, hǝrf, simvol vë ya xüsusi format seçə bilərsiniz.

      -

      Sähifä nömrälämänin hündürlüyünü dǝyişdirmǝk olar?

      -

      BƏli, sähifä nömrälämänin hündürlüyünü dǝyişdirmǝk olar. Bunun üçün, sähifänin aşağı hissësinin sağ küncündæki "Sähifä Nömrälämä" düymäsini basın vë açılan menüdæn "Nömrälämä Formatı" seçimini basın. Sonra açılan pencerädæ hündürlüyünü seçin. Mäsǝlǝn, normal, kiçik, böyük vë ya xüsusi hündürlük seçə bilərsiniz.

      -

      Sähifä nömrälämänin yerlǝşmäsini dǝyişdirmǝk olar?

      -

      BƏli, sähifä nömrälämänin yerlǝşmäsini dǝyişdirmǝk olar. Bunun üçün, sähifänin aşağı hissësinin sağ küncündæki "Sähifä Nömrälämä" düymäsini basın vë açılan menüdæn "Nömrälämänin Yerini Dǝyişdir" seçimini basın. Sonra istǝdiyiniz yeri seçin. Mäsǝlǝn, sol, orta, sağ, kənar vë ya xüsusi yerlǝşmə seçə bilərsiniz.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Adobe Audition 1.5 [Working][Crack] - Khaos Industries The Best Way to Edit Audio for Free.md b/spaces/contluForse/HuggingGPT/assets/Adobe Audition 1.5 [Working][Crack] - Khaos Industries The Best Way to Edit Audio for Free.md deleted file mode 100644 index b53b38dcecf112e35a616a474847b824b22f0380..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Adobe Audition 1.5 [Working][Crack] - Khaos Industries The Best Way to Edit Audio for Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Adobe Audition 1.5 [Working][Crack] - Khaos Industries Setup Free


      Download File ->->->-> https://ssurll.com/2uzxLe



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/sep_aspp_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/sep_aspp_head.py deleted file mode 100644 index a23970699df7afd86f483316be3c8d1a34d43c18..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/sep_aspp_head.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.nn as nn -from annotator.mmpkg.mmcv.cnn import ConvModule, DepthwiseSeparableConvModule - -from annotator.mmpkg.mmseg.ops import resize -from ..builder import HEADS -from .aspp_head import ASPPHead, ASPPModule - - -class DepthwiseSeparableASPPModule(ASPPModule): - """Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable - conv.""" - - def __init__(self, **kwargs): - super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) - for i, dilation in enumerate(self.dilations): - if dilation > 1: - self[i] = DepthwiseSeparableConvModule( - self.in_channels, - self.channels, - 3, - dilation=dilation, - padding=dilation, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - -@HEADS.register_module() -class DepthwiseSeparableASPPHead(ASPPHead): - """Encoder-Decoder with Atrous Separable Convolution for Semantic Image - Segmentation. - - This head is the implementation of `DeepLabV3+ - `_. - - Args: - c1_in_channels (int): The input channels of c1 decoder. If is 0, - the no decoder will be used. - c1_channels (int): The intermediate channels of c1 decoder. - """ - - def __init__(self, c1_in_channels, c1_channels, **kwargs): - super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) - assert c1_in_channels >= 0 - self.aspp_modules = DepthwiseSeparableASPPModule( - dilations=self.dilations, - in_channels=self.in_channels, - channels=self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - if c1_in_channels > 0: - self.c1_bottleneck = ConvModule( - c1_in_channels, - c1_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - else: - self.c1_bottleneck = None - self.sep_bottleneck = nn.Sequential( - DepthwiseSeparableConvModule( - self.channels + c1_channels, - self.channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg), - DepthwiseSeparableConvModule( - self.channels, - self.channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - aspp_outs = [ - resize( - self.image_pool(x), - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ] - aspp_outs.extend(self.aspp_modules(x)) - aspp_outs = torch.cat(aspp_outs, dim=1) - output = self.bottleneck(aspp_outs) - if self.c1_bottleneck is not None: - c1_output = self.c1_bottleneck(inputs[0]) - output = resize( - input=output, - size=c1_output.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - output = torch.cat([output, c1_output], dim=1) - output = self.sep_bottleneck(output) - output = self.cls_seg(output) - return output diff --git a/spaces/crashedice/signify/signify/gan/data/image_folder.py b/spaces/crashedice/signify/signify/gan/data/image_folder.py deleted file mode 100644 index f7cb91574a0487c51e5dd8210aebb38edb0b16ef..0000000000000000000000000000000000000000 --- a/spaces/crashedice/signify/signify/gan/data/image_folder.py +++ /dev/null @@ -1,65 +0,0 @@ -"""A modified image folder class - -We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) -so that this class can load images from both current directory and its subdirectories. -""" - -import torch.utils.data as data - -from PIL import Image -import os - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', - '.tif', '.TIF', '.tiff', '.TIFF', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir, max_dataset_size=float("inf")): - images = [] - assert os.path.isdir(dir), '%s is not a valid directory' % dir - - for root, _, fnames in sorted(os.walk(dir)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images[:min(max_dataset_size, len(images))] - - -def default_loader(path): - return Image.open(path).convert('RGB') - - -class ImageFolder(data.Dataset): - - def __init__(self, root, transform=None, return_paths=False, - loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise(RuntimeError("Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/crylake/img2poem/query2labels/lib/models/transformer.py b/spaces/crylake/img2poem/query2labels/lib/models/transformer.py deleted file mode 100644 index d2393e2c2a16b96e470df45050cc5d39597498a5..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/lib/models/transformer.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Q2L Transformer class. - -Most borrow from DETR except: - * remove self-attention by default. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers - * using modified multihead attention from nn_multiheadattention.py -""" -import copy -from typing import Optional, List - -import torch -import torch.nn.functional as F -from torch import nn, Tensor -from torch.nn import MultiheadAttention - - - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False, - return_intermediate_dec=False, - rm_self_attn_dec=True, rm_first_self_attn=True, - ): - super().__init__() - - self.num_encoder_layers = num_encoder_layers - if num_decoder_layers > 0: - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, - return_intermediate=return_intermediate_dec) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - self.rm_self_attn_dec = rm_self_attn_dec - self.rm_first_self_attn = rm_first_self_attn - - if self.rm_self_attn_dec or self.rm_first_self_attn: - self.rm_self_attn_dec_func() - - # self.debug_mode = False - # self.set_debug_mode(self.debug_mode) - - def rm_self_attn_dec_func(self): - total_modifie_layer_num = 0 - rm_list = [] - for idx, layer in enumerate(self.decoder.layers): - if idx == 0 and not self.rm_first_self_attn: - continue - if idx != 0 and not self.rm_self_attn_dec: - continue - - layer.omit_selfattn = True - del layer.self_attn - del layer.dropout1 - del layer.norm1 - - total_modifie_layer_num += 1 - rm_list.append(idx) - # remove some self-attention layer - # print("rm {} layer: {}".format(total_modifie_layer_num, rm_list)) - - def set_debug_mode(self, status): - print("set debug mode to {}!!!".format(status)) - self.debug_mode = status - if hasattr(self, 'encoder'): - for idx, layer in enumerate(self.encoder.layers): - layer.debug_mode = status - layer.debug_name = str(idx) - if hasattr(self, 'decoder'): - for idx, layer in enumerate(self.decoder.layers): - layer.debug_mode = status - layer.debug_name = str(idx) - - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, query_embed, pos_embed, mask=None): - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - if mask is not None: - mask = mask.flatten(1) - - - if self.num_encoder_layers > 0: - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - else: - memory = src - - tgt = torch.zeros_like(query_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, - pos=pos_embed, query_pos=query_embed) - - return hs.transpose(1, 2), memory[:h*w].permute(1, 2, 0).view(bs, c, h, w) - - -class TransformerEncoder(nn.Module): - - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - output = tgt - - intermediate = [] - - for layer in self.layers: - output = layer(output, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - if self.return_intermediate: - intermediate.append(self.norm(output)) - - if self.norm is not None: - output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) - - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - self.debug_mode = False - self.debug_name = None - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2, corr = self.self_attn(q, k, value=src, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask) - - - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -class TransformerDecoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - self.debug_mode = False - self.debug_name = None - self.omit_selfattn = False - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(tgt, query_pos) - - if not self.omit_selfattn: - tgt2, sim_mat_1 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask) - - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - - tgt2, sim_mat_2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask) - - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - return self.forward_post(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=False, - rm_self_attn_dec=not args.keep_other_self_attn_dec, - rm_first_self_attn=not args.keep_first_self_attn_dec, - ) - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/diffusion_webui/utils/preprocces_utils.py b/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/diffusion_webui/utils/preprocces_utils.py deleted file mode 100644 index f1824721e4804eecd48b453a37c1ce0377468773..0000000000000000000000000000000000000000 --- a/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/diffusion_webui/utils/preprocces_utils.py +++ /dev/null @@ -1,96 +0,0 @@ -from controlnet_aux import ( - CannyDetector, - ContentShuffleDetector, - HEDdetector, - LineartAnimeDetector, - LineartDetector, - MediapipeFaceDetector, - MidasDetector, - MLSDdetector, - NormalBaeDetector, - OpenposeDetector, - PidiNetDetector, - SamDetector, - ZoeDetector, -) - -import numpy as np -import cv2 - -def pad64(x): - return int(np.ceil(float(x) / 64.0) * 64 - x) - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - -def safer_memory(x): - return np.ascontiguousarray(x.copy()).copy() - - -def resize_image_with_pad(input_image, resolution, skip_hwc3=False): - if skip_hwc3: - img = input_image - else: - img = HWC3(input_image) - - H_raw, W_raw, _ = img.shape - k = float(resolution) / float(min(H_raw, W_raw)) - interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA - H_target = int(np.round(float(H_raw) * k)) - W_target = int(np.round(float(W_raw) * k)) - img = cv2.resize(img, (W_target, H_target), interpolation=interpolation) - H_pad, W_pad = pad64(H_target), pad64(W_target) - img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge') - - def remove_pad(x): - return safer_memory(x[:H_target, :W_target]) - - return safer_memory(img_padded), remove_pad - - -def scribble_xdog(img, res=512, thr_a=32, **kwargs): - img, remove_pad = resize_image_with_pad(img, res) - g1 = cv2.GaussianBlur(img.astype(np.float32), (0, 0), 0.5) - g2 = cv2.GaussianBlur(img.astype(np.float32), (0, 0), 5.0) - dog = (255 - np.min(g2 - g1, axis=2)).clip(0, 255).astype(np.uint8) - result = np.zeros_like(img, dtype=np.uint8) - result[2 * (255 - dog) > thr_a] = 255 - return remove_pad(result), True - -def none_preprocces(image_path:str): - return Image.open(image_path) - -PREPROCCES_DICT = { - "Hed": HEDdetector.from_pretrained("lllyasviel/Annotators"), - "Midas": MidasDetector.from_pretrained("lllyasviel/Annotators"), - "MLSD": MLSDdetector.from_pretrained("lllyasviel/Annotators"), - "Openpose": OpenposeDetector.from_pretrained("lllyasviel/Annotators"), - "PidiNet": PidiNetDetector.from_pretrained("lllyasviel/Annotators"), - "NormalBae": NormalBaeDetector.from_pretrained("lllyasviel/Annotators"), - "Lineart": LineartDetector.from_pretrained("lllyasviel/Annotators"), - "LineartAnime": LineartAnimeDetector.from_pretrained( - "lllyasviel/Annotators" - ), - "Zoe": ZoeDetector.from_pretrained("lllyasviel/Annotators"), - "Canny": CannyDetector(), - "ContentShuffle": ContentShuffleDetector(), - "MediapipeFace": MediapipeFaceDetector(), - "ScribbleXDOG": scribble_xdog, - "None": none_preprocces -} - \ No newline at end of file diff --git a/spaces/daarumadx/bot/src/processing/worker.py b/spaces/daarumadx/bot/src/processing/worker.py deleted file mode 100644 index e3033dde4483b4cfd60a867b136d876ed54225c1..0000000000000000000000000000000000000000 --- a/spaces/daarumadx/bot/src/processing/worker.py +++ /dev/null @@ -1,58 +0,0 @@ -""" Wokers definition """ -# TODO Implement this with a queue and mutliprocessingt -import threading - -from config import Config as Conf -from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude -from transform.opencv.correct import DressToCorrect, ColorTransfer -from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin -from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale, ImageToNearest, ImageCompress - -workers = { - "gan": { - CorrectToMask: [], - MaskrefToMaskdet: [], - MaskfinToNude: [], - "sem": threading.Semaphore(1) - }, - "opencv": { - DressToCorrect: [], - MaskToMaskref: [], - ImageToResized: [], - ImageToCrop: [], - ImageToOverlay: [], - ImageToResizedCrop: [], - ImageToRescale: [], - ImageToNearest: [], - ImageCompress: [], - ColorTransfer: [], - MaskdetToMaskfin: [], - "sem": threading.Semaphore(Conf.cores()) - } -} -select_sem = threading.Semaphore(1) - - -def run_worker(klass, image_step, config=None): - r = None - for k in ("gan", "opencv"): - if workers.get(k).get(klass) is not None: - #Conf.log.debug("wk {}".format(workers.get(k).get(klass))) - - workers.get(k).get("sem").acquire() - - select_sem.acquire() - if len(workers.get(k).get(klass)) == 0: - w = klass() - else: - w = workers.get(k).get(klass).pop(0) - select_sem.release() - - r = w.run(*[image_step[i] for i in w.input_index], config=config) - - select_sem.acquire() - workers.get(k).get(klass).append(w) - select_sem.release() - - workers.get(k).get("sem").release() - return r diff --git a/spaces/dawood/chatbot-guide/app.py b/spaces/dawood/chatbot-guide/app.py deleted file mode 100644 index d53b83592c87e0a9ee8e79486c82cc7daef8acbb..0000000000000000000000000000000000000000 --- a/spaces/dawood/chatbot-guide/app.py +++ /dev/null @@ -1,35 +0,0 @@ -import gradio as gr -from transformers import AutoModelForCausalLM, AutoTokenizer -import torch - -tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") -model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") - - -def predict(input, history=[]): - # tokenize the new input sentence - print(input) - print(tokenizer.eos_token) - new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt') - - # append the new user input tokens to the chat history - bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) - - # generate a response - history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist() - - # convert the tokens to text, and then split the responses into lines - response = tokenizer.decode(history[0]).split("<|endoftext|>") - response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list - return response, history - -with gr.Blocks() as demo: - chatbot = gr.Chatbot() - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) - - txt.submit(predict, [txt, state], [chatbot, state]) - -demo.launch() \ No newline at end of file diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/checkboxgroup.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/checkboxgroup.py deleted file mode 100644 index 0cc5448ea45d42d1b5149bbb98f907676411ea9a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/checkboxgroup.py +++ /dev/null @@ -1,215 +0,0 @@ -"""gr.CheckboxGroup() component""" - -from __future__ import annotations - -from typing import Any, Callable, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import ListStringSerializable - -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable -from gradio.interpretation import NeighborInterpretable - -set_documentation_group("component") - - -@document() -class CheckboxGroup( - FormComponent, - Changeable, - Inputable, - Selectable, - IOComponent, - ListStringSerializable, - NeighborInterpretable, -): - """ - Creates a set of checkboxes of which a subset can be checked. - Preprocessing: passes the list of checked checkboxes as a {List[str | int | float]} or their indices as a {List[int]} into the function, depending on `type`. - Postprocessing: expects a {List[str | int | float]}, each element of which becomes a checked checkbox. - Examples-format: a {List[str | int | float]} representing the values to be checked. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - choices: list[str | float | int] | None = None, - *, - value: list[str | float | int] | str | float | int | Callable | None = None, - type: Literal["value", "index"] = "value", - label: str | None = None, - info: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - choices: list of (string or numeric) options to select from. - value: default selected list of options. If a single choice is selected, it can be passed in as a string or numeric type. If callable, the function will be called whenever the app loads to set the initial value of the component. - type: Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indices of the choices selected. - label: component name in interface. - info: additional component description. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.choices = choices or [] - valid_types = ["value", "index"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.select: EventListenerMethod - """ - Event listener for when the user selects or deselects within CheckboxGroup. - Uses event data gradio.SelectData to carry `value` referring to label of selected checkbox, `index` to refer to index, and `selected` to refer to state of checkbox. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, - label=label, - info=info, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - NeighborInterpretable.__init__(self) - - def get_config(self): - return { - "choices": self.choices, - "value": self.value, - **IOComponent.get_config(self), - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": self.choices[0] if self.choices else None, - "serialized": self.choices[0] if self.choices else None, - } - - @staticmethod - def update( - value: list[str | int | float] - | str - | Literal[_Keywords.NO_VALUE] - | None = _Keywords.NO_VALUE, - choices: list[str] | None = None, - label: str | None = None, - info: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - return { - "choices": choices, - "label": label, - "info": info, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - - def preprocess(self, x: list[str | int | float]) -> list[str | int | float]: - """ - Parameters: - x: list of selected choices - Returns: - list of selected choices as strings or indices within choice list - """ - if self.type == "value": - return x - elif self.type == "index": - return [self.choices.index(choice) for choice in x] - else: - raise ValueError( - f"Unknown type: {self.type}. Please choose from: 'value', 'index'." - ) - - def postprocess( - self, y: list[str | int | float] | str | int | float | None - ) -> list[str | int | float]: - """ - Any postprocessing needed to be performed on function output. - Parameters: - y: List of selected choices. If a single choice is selected, it can be passed in as a string - Returns: - List of selected choices - """ - if y is None: - return [] - if not isinstance(y, list): - y = [y] - return y - - def get_interpretation_neighbors(self, x): - leave_one_out_sets = [] - for choice in self.choices: - leave_one_out_set = list(x) - if choice in leave_one_out_set: - leave_one_out_set.remove(choice) - else: - leave_one_out_set.append(choice) - leave_one_out_sets.append(leave_one_out_set) - return leave_one_out_sets, {} - - def get_interpretation_scores(self, x, neighbors, scores, **kwargs): - """ - Returns: - For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True. - """ - final_scores = [] - for choice, score in zip(self.choices, scores): - score_set = [score, None] if choice in x else [None, score] - final_scores.append(score_set) - return final_scores - - def style( - self, - *, - item_container: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if item_container is not None: - warn_deprecation("The `item_container` parameter is deprecated.") - if container is not None: - self.container = container - return self diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py deleted file mode 100644 index d32e55328d6799ccb8d61625f43abb80a33d6c17..0000000000000000000000000000000000000000 --- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py +++ /dev/null @@ -1,1088 +0,0 @@ -# NOTE: This script is currently not supported for CLAP. - -imagenet_classnames = [ - "tench", - "goldfish", - "great white shark", - "tiger shark", - "hammerhead shark", - "electric ray", - "stingray", - "rooster", - "hen", - "ostrich", - "brambling", - "goldfinch", - "house finch", - "junco", - "indigo bunting", - "American robin", - "bulbul", - "jay", - "magpie", - "chickadee", - "American dipper", - "kite (bird of prey)", - "bald eagle", - "vulture", - "great grey owl", - "fire salamander", - "smooth newt", - "newt", - "spotted salamander", - "axolotl", - "American bullfrog", - "tree frog", - "tailed frog", - "loggerhead sea turtle", - "leatherback sea turtle", - "mud turtle", - "terrapin", - "box turtle", - "banded gecko", - "green iguana", - "Carolina anole", - "desert grassland whiptail lizard", - "agama", - "frilled-necked lizard", - "alligator lizard", - "Gila monster", - "European green lizard", - "chameleon", - "Komodo dragon", - "Nile crocodile", - "American alligator", - "triceratops", - "worm snake", - "ring-necked snake", - "eastern hog-nosed snake", - "smooth green snake", - "kingsnake", - "garter snake", - "water snake", - "vine snake", - "night snake", - "boa constrictor", - "African rock python", - "Indian cobra", - "green mamba", - "sea snake", - "Saharan horned viper", - "eastern diamondback rattlesnake", - "sidewinder rattlesnake", - "trilobite", - "harvestman", - "scorpion", - "yellow garden spider", - "barn spider", - "European garden spider", - "southern black widow", - "tarantula", - "wolf spider", - "tick", - "centipede", - "black grouse", - "ptarmigan", - "ruffed grouse", - "prairie grouse", - "peafowl", - "quail", - "partridge", - "african grey parrot", - "macaw", - "sulphur-crested cockatoo", - "lorikeet", - "coucal", - "bee eater", - "hornbill", - "hummingbird", - "jacamar", - "toucan", - "duck", - "red-breasted merganser", - "goose", - "black swan", - "tusker", - "echidna", - "platypus", - "wallaby", - "koala", - "wombat", - "jellyfish", - "sea anemone", - "brain coral", - "flatworm", - "nematode", - "conch", - "snail", - "slug", - "sea slug", - "chiton", - "chambered nautilus", - "Dungeness crab", - "rock crab", - "fiddler crab", - "red king crab", - "American lobster", - "spiny lobster", - "crayfish", - "hermit crab", - "isopod", - "white stork", - "black stork", - "spoonbill", - "flamingo", - "little blue heron", - "great egret", - "bittern bird", - "crane bird", - "limpkin", - "common gallinule", - "American coot", - "bustard", - "ruddy turnstone", - "dunlin", - "common redshank", - "dowitcher", - "oystercatcher", - "pelican", - "king penguin", - "albatross", - "grey whale", - "killer whale", - "dugong", - "sea lion", - "Chihuahua", - "Japanese Chin", - "Maltese", - "Pekingese", - "Shih Tzu", - "King Charles Spaniel", - "Papillon", - "toy terrier", - "Rhodesian Ridgeback", - "Afghan Hound", - "Basset Hound", - "Beagle", - "Bloodhound", - "Bluetick Coonhound", - "Black and Tan Coonhound", - "Treeing Walker Coonhound", - "English foxhound", - "Redbone Coonhound", - "borzoi", - "Irish Wolfhound", - "Italian Greyhound", - "Whippet", - "Ibizan Hound", - "Norwegian Elkhound", - "Otterhound", - "Saluki", - "Scottish Deerhound", - "Weimaraner", - "Staffordshire Bull Terrier", - "American Staffordshire Terrier", - "Bedlington Terrier", - "Border Terrier", - "Kerry Blue Terrier", - "Irish Terrier", - "Norfolk Terrier", - "Norwich Terrier", - "Yorkshire Terrier", - "Wire Fox Terrier", - "Lakeland Terrier", - "Sealyham Terrier", - "Airedale Terrier", - "Cairn Terrier", - "Australian Terrier", - "Dandie Dinmont Terrier", - "Boston Terrier", - "Miniature Schnauzer", - "Giant Schnauzer", - "Standard Schnauzer", - "Scottish Terrier", - "Tibetan Terrier", - "Australian Silky Terrier", - "Soft-coated Wheaten Terrier", - "West Highland White Terrier", - "Lhasa Apso", - "Flat-Coated Retriever", - "Curly-coated Retriever", - "Golden Retriever", - "Labrador Retriever", - "Chesapeake Bay Retriever", - "German Shorthaired Pointer", - "Vizsla", - "English Setter", - "Irish Setter", - "Gordon Setter", - "Brittany dog", - "Clumber Spaniel", - "English Springer Spaniel", - "Welsh Springer Spaniel", - "Cocker Spaniel", - "Sussex Spaniel", - "Irish Water Spaniel", - "Kuvasz", - "Schipperke", - "Groenendael dog", - "Malinois", - "Briard", - "Australian Kelpie", - "Komondor", - "Old English Sheepdog", - "Shetland Sheepdog", - "collie", - "Border Collie", - "Bouvier des Flandres dog", - "Rottweiler", - "German Shepherd Dog", - "Dobermann", - "Miniature Pinscher", - "Greater Swiss Mountain Dog", - "Bernese Mountain Dog", - "Appenzeller Sennenhund", - "Entlebucher Sennenhund", - "Boxer", - "Bullmastiff", - "Tibetan Mastiff", - "French Bulldog", - "Great Dane", - "St. Bernard", - "husky", - "Alaskan Malamute", - "Siberian Husky", - "Dalmatian", - "Affenpinscher", - "Basenji", - "pug", - "Leonberger", - "Newfoundland dog", - "Great Pyrenees dog", - "Samoyed", - "Pomeranian", - "Chow Chow", - "Keeshond", - "brussels griffon", - "Pembroke Welsh Corgi", - "Cardigan Welsh Corgi", - "Toy Poodle", - "Miniature Poodle", - "Standard Poodle", - "Mexican hairless dog (xoloitzcuintli)", - "grey wolf", - "Alaskan tundra wolf", - "red wolf or maned wolf", - "coyote", - "dingo", - "dhole", - "African wild dog", - "hyena", - "red fox", - "kit fox", - "Arctic fox", - "grey fox", - "tabby cat", - "tiger cat", - "Persian cat", - "Siamese cat", - "Egyptian Mau", - "cougar", - "lynx", - "leopard", - "snow leopard", - "jaguar", - "lion", - "tiger", - "cheetah", - "brown bear", - "American black bear", - "polar bear", - "sloth bear", - "mongoose", - "meerkat", - "tiger beetle", - "ladybug", - "ground beetle", - "longhorn beetle", - "leaf beetle", - "dung beetle", - "rhinoceros beetle", - "weevil", - "fly", - "bee", - "ant", - "grasshopper", - "cricket insect", - "stick insect", - "cockroach", - "praying mantis", - "cicada", - "leafhopper", - "lacewing", - "dragonfly", - "damselfly", - "red admiral butterfly", - "ringlet butterfly", - "monarch butterfly", - "small white butterfly", - "sulphur butterfly", - "gossamer-winged butterfly", - "starfish", - "sea urchin", - "sea cucumber", - "cottontail rabbit", - "hare", - "Angora rabbit", - "hamster", - "porcupine", - "fox squirrel", - "marmot", - "beaver", - "guinea pig", - "common sorrel horse", - "zebra", - "pig", - "wild boar", - "warthog", - "hippopotamus", - "ox", - "water buffalo", - "bison", - "ram (adult male sheep)", - "bighorn sheep", - "Alpine ibex", - "hartebeest", - "impala (antelope)", - "gazelle", - "arabian camel", - "llama", - "weasel", - "mink", - "European polecat", - "black-footed ferret", - "otter", - "skunk", - "badger", - "armadillo", - "three-toed sloth", - "orangutan", - "gorilla", - "chimpanzee", - "gibbon", - "siamang", - "guenon", - "patas monkey", - "baboon", - "macaque", - "langur", - "black-and-white colobus", - "proboscis monkey", - "marmoset", - "white-headed capuchin", - "howler monkey", - "titi monkey", - "Geoffroy's spider monkey", - "common squirrel monkey", - "ring-tailed lemur", - "indri", - "Asian elephant", - "African bush elephant", - "red panda", - "giant panda", - "snoek fish", - "eel", - "silver salmon", - "rock beauty fish", - "clownfish", - "sturgeon", - "gar fish", - "lionfish", - "pufferfish", - "abacus", - "abaya", - "academic gown", - "accordion", - "acoustic guitar", - "aircraft carrier", - "airliner", - "airship", - "altar", - "ambulance", - "amphibious vehicle", - "analog clock", - "apiary", - "apron", - "trash can", - "assault rifle", - "backpack", - "bakery", - "balance beam", - "balloon", - "ballpoint pen", - "Band-Aid", - "banjo", - "baluster / handrail", - "barbell", - "barber chair", - "barbershop", - "barn", - "barometer", - "barrel", - "wheelbarrow", - "baseball", - "basketball", - "bassinet", - "bassoon", - "swimming cap", - "bath towel", - "bathtub", - "station wagon", - "lighthouse", - "beaker", - "military hat (bearskin or shako)", - "beer bottle", - "beer glass", - "bell tower", - "baby bib", - "tandem bicycle", - "bikini", - "ring binder", - "binoculars", - "birdhouse", - "boathouse", - "bobsleigh", - "bolo tie", - "poke bonnet", - "bookcase", - "bookstore", - "bottle cap", - "hunting bow", - "bow tie", - "brass memorial plaque", - "bra", - "breakwater", - "breastplate", - "broom", - "bucket", - "buckle", - "bulletproof vest", - "high-speed train", - "butcher shop", - "taxicab", - "cauldron", - "candle", - "cannon", - "canoe", - "can opener", - "cardigan", - "car mirror", - "carousel", - "tool kit", - "cardboard box / carton", - "car wheel", - "automated teller machine", - "cassette", - "cassette player", - "castle", - "catamaran", - "CD player", - "cello", - "mobile phone", - "chain", - "chain-link fence", - "chain mail", - "chainsaw", - "storage chest", - "chiffonier", - "bell or wind chime", - "china cabinet", - "Christmas stocking", - "church", - "movie theater", - "cleaver", - "cliff dwelling", - "cloak", - "clogs", - "cocktail shaker", - "coffee mug", - "coffeemaker", - "spiral or coil", - "combination lock", - "computer keyboard", - "candy store", - "container ship", - "convertible", - "corkscrew", - "cornet", - "cowboy boot", - "cowboy hat", - "cradle", - "construction crane", - "crash helmet", - "crate", - "infant bed", - "Crock Pot", - "croquet ball", - "crutch", - "cuirass", - "dam", - "desk", - "desktop computer", - "rotary dial telephone", - "diaper", - "digital clock", - "digital watch", - "dining table", - "dishcloth", - "dishwasher", - "disc brake", - "dock", - "dog sled", - "dome", - "doormat", - "drilling rig", - "drum", - "drumstick", - "dumbbell", - "Dutch oven", - "electric fan", - "electric guitar", - "electric locomotive", - "entertainment center", - "envelope", - "espresso machine", - "face powder", - "feather boa", - "filing cabinet", - "fireboat", - "fire truck", - "fire screen", - "flagpole", - "flute", - "folding chair", - "football helmet", - "forklift", - "fountain", - "fountain pen", - "four-poster bed", - "freight car", - "French horn", - "frying pan", - "fur coat", - "garbage truck", - "gas mask or respirator", - "gas pump", - "goblet", - "go-kart", - "golf ball", - "golf cart", - "gondola", - "gong", - "gown", - "grand piano", - "greenhouse", - "radiator grille", - "grocery store", - "guillotine", - "hair clip", - "hair spray", - "half-track", - "hammer", - "hamper", - "hair dryer", - "hand-held computer", - "handkerchief", - "hard disk drive", - "harmonica", - "harp", - "combine harvester", - "hatchet", - "holster", - "home theater", - "honeycomb", - "hook", - "hoop skirt", - "gymnastic horizontal bar", - "horse-drawn vehicle", - "hourglass", - "iPod", - "clothes iron", - "carved pumpkin", - "jeans", - "jeep", - "T-shirt", - "jigsaw puzzle", - "rickshaw", - "joystick", - "kimono", - "knee pad", - "knot", - "lab coat", - "ladle", - "lampshade", - "laptop computer", - "lawn mower", - "lens cap", - "letter opener", - "library", - "lifeboat", - "lighter", - "limousine", - "ocean liner", - "lipstick", - "slip-on shoe", - "lotion", - "music speaker", - "loupe magnifying glass", - "sawmill", - "magnetic compass", - "messenger bag", - "mailbox", - "tights", - "one-piece bathing suit", - "manhole cover", - "maraca", - "marimba", - "mask", - "matchstick", - "maypole", - "maze", - "measuring cup", - "medicine cabinet", - "megalith", - "microphone", - "microwave oven", - "military uniform", - "milk can", - "minibus", - "miniskirt", - "minivan", - "missile", - "mitten", - "mixing bowl", - "mobile home", - "ford model t", - "modem", - "monastery", - "monitor", - "moped", - "mortar and pestle", - "graduation cap", - "mosque", - "mosquito net", - "vespa", - "mountain bike", - "tent", - "computer mouse", - "mousetrap", - "moving van", - "muzzle", - "metal nail", - "neck brace", - "necklace", - "baby pacifier", - "notebook computer", - "obelisk", - "oboe", - "ocarina", - "odometer", - "oil filter", - "pipe organ", - "oscilloscope", - "overskirt", - "bullock cart", - "oxygen mask", - "product packet / packaging", - "paddle", - "paddle wheel", - "padlock", - "paintbrush", - "pajamas", - "palace", - "pan flute", - "paper towel", - "parachute", - "parallel bars", - "park bench", - "parking meter", - "railroad car", - "patio", - "payphone", - "pedestal", - "pencil case", - "pencil sharpener", - "perfume", - "Petri dish", - "photocopier", - "plectrum", - "Pickelhaube", - "picket fence", - "pickup truck", - "pier", - "piggy bank", - "pill bottle", - "pillow", - "ping-pong ball", - "pinwheel", - "pirate ship", - "drink pitcher", - "block plane", - "planetarium", - "plastic bag", - "plate rack", - "farm plow", - "plunger", - "Polaroid camera", - "pole", - "police van", - "poncho", - "pool table", - "soda bottle", - "plant pot", - "potter's wheel", - "power drill", - "prayer rug", - "printer", - "prison", - "missile", - "projector", - "hockey puck", - "punching bag", - "purse", - "quill", - "quilt", - "race car", - "racket", - "radiator", - "radio", - "radio telescope", - "rain barrel", - "recreational vehicle", - "fishing casting reel", - "reflex camera", - "refrigerator", - "remote control", - "restaurant", - "revolver", - "rifle", - "rocking chair", - "rotisserie", - "eraser", - "rugby ball", - "ruler measuring stick", - "sneaker", - "safe", - "safety pin", - "salt shaker", - "sandal", - "sarong", - "saxophone", - "scabbard", - "weighing scale", - "school bus", - "schooner", - "scoreboard", - "CRT monitor", - "screw", - "screwdriver", - "seat belt", - "sewing machine", - "shield", - "shoe store", - "shoji screen / room divider", - "shopping basket", - "shopping cart", - "shovel", - "shower cap", - "shower curtain", - "ski", - "balaclava ski mask", - "sleeping bag", - "slide rule", - "sliding door", - "slot machine", - "snorkel", - "snowmobile", - "snowplow", - "soap dispenser", - "soccer ball", - "sock", - "solar thermal collector", - "sombrero", - "soup bowl", - "keyboard space bar", - "space heater", - "space shuttle", - "spatula", - "motorboat", - "spider web", - "spindle", - "sports car", - "spotlight", - "stage", - "steam locomotive", - "through arch bridge", - "steel drum", - "stethoscope", - "scarf", - "stone wall", - "stopwatch", - "stove", - "strainer", - "tram", - "stretcher", - "couch", - "stupa", - "submarine", - "suit", - "sundial", - "sunglasses", - "sunglasses", - "sunscreen", - "suspension bridge", - "mop", - "sweatshirt", - "swim trunks / shorts", - "swing", - "electrical switch", - "syringe", - "table lamp", - "tank", - "tape player", - "teapot", - "teddy bear", - "television", - "tennis ball", - "thatched roof", - "front curtain", - "thimble", - "threshing machine", - "throne", - "tile roof", - "toaster", - "tobacco shop", - "toilet seat", - "torch", - "totem pole", - "tow truck", - "toy store", - "tractor", - "semi-trailer truck", - "tray", - "trench coat", - "tricycle", - "trimaran", - "tripod", - "triumphal arch", - "trolleybus", - "trombone", - "hot tub", - "turnstile", - "typewriter keyboard", - "umbrella", - "unicycle", - "upright piano", - "vacuum cleaner", - "vase", - "vaulted or arched ceiling", - "velvet fabric", - "vending machine", - "vestment", - "viaduct", - "violin", - "volleyball", - "waffle iron", - "wall clock", - "wallet", - "wardrobe", - "military aircraft", - "sink", - "washing machine", - "water bottle", - "water jug", - "water tower", - "whiskey jug", - "whistle", - "hair wig", - "window screen", - "window shade", - "Windsor tie", - "wine bottle", - "airplane wing", - "wok", - "wooden spoon", - "wool", - "split-rail fence", - "shipwreck", - "sailboat", - "yurt", - "website", - "comic book", - "crossword", - "traffic or street sign", - "traffic light", - "dust jacket", - "menu", - "plate", - "guacamole", - "consomme", - "hot pot", - "trifle", - "ice cream", - "popsicle", - "baguette", - "bagel", - "pretzel", - "cheeseburger", - "hot dog", - "mashed potatoes", - "cabbage", - "broccoli", - "cauliflower", - "zucchini", - "spaghetti squash", - "acorn squash", - "butternut squash", - "cucumber", - "artichoke", - "bell pepper", - "cardoon", - "mushroom", - "Granny Smith apple", - "strawberry", - "orange", - "lemon", - "fig", - "pineapple", - "banana", - "jackfruit", - "cherimoya (custard apple)", - "pomegranate", - "hay", - "carbonara", - "chocolate syrup", - "dough", - "meatloaf", - "pizza", - "pot pie", - "burrito", - "red wine", - "espresso", - "tea cup", - "eggnog", - "mountain", - "bubble", - "cliff", - "coral reef", - "geyser", - "lakeshore", - "promontory", - "sandbar", - "beach", - "valley", - "volcano", - "baseball player", - "bridegroom", - "scuba diver", - "rapeseed", - "daisy", - "yellow lady's slipper", - "corn", - "acorn", - "rose hip", - "horse chestnut seed", - "coral fungus", - "agaric", - "gyromitra", - "stinkhorn mushroom", - "earth star fungus", - "hen of the woods mushroom", - "bolete", - "corn cob", - "toilet paper", -] - - -openai_imagenet_template = [ - lambda c: f"a bad photo of a {c}.", - lambda c: f"a photo of many {c}.", - lambda c: f"a sculpture of a {c}.", - lambda c: f"a photo of the hard to see {c}.", - lambda c: f"a low resolution photo of the {c}.", - lambda c: f"a rendering of a {c}.", - lambda c: f"graffiti of a {c}.", - lambda c: f"a bad photo of the {c}.", - lambda c: f"a cropped photo of the {c}.", - lambda c: f"a tattoo of a {c}.", - lambda c: f"the embroidered {c}.", - lambda c: f"a photo of a hard to see {c}.", - lambda c: f"a bright photo of a {c}.", - lambda c: f"a photo of a clean {c}.", - lambda c: f"a photo of a dirty {c}.", - lambda c: f"a dark photo of the {c}.", - lambda c: f"a drawing of a {c}.", - lambda c: f"a photo of my {c}.", - lambda c: f"the plastic {c}.", - lambda c: f"a photo of the cool {c}.", - lambda c: f"a close-up photo of a {c}.", - lambda c: f"a black and white photo of the {c}.", - lambda c: f"a painting of the {c}.", - lambda c: f"a painting of a {c}.", - lambda c: f"a pixelated photo of the {c}.", - lambda c: f"a sculpture of the {c}.", - lambda c: f"a bright photo of the {c}.", - lambda c: f"a cropped photo of a {c}.", - lambda c: f"a plastic {c}.", - lambda c: f"a photo of the dirty {c}.", - lambda c: f"a jpeg corrupted photo of a {c}.", - lambda c: f"a blurry photo of the {c}.", - lambda c: f"a photo of the {c}.", - lambda c: f"a good photo of the {c}.", - lambda c: f"a rendering of the {c}.", - lambda c: f"a {c} in a video game.", - lambda c: f"a photo of one {c}.", - lambda c: f"a doodle of a {c}.", - lambda c: f"a close-up photo of the {c}.", - lambda c: f"a photo of a {c}.", - lambda c: f"the origami {c}.", - lambda c: f"the {c} in a video game.", - lambda c: f"a sketch of a {c}.", - lambda c: f"a doodle of the {c}.", - lambda c: f"a origami {c}.", - lambda c: f"a low resolution photo of a {c}.", - lambda c: f"the toy {c}.", - lambda c: f"a rendition of the {c}.", - lambda c: f"a photo of the clean {c}.", - lambda c: f"a photo of a large {c}.", - lambda c: f"a rendition of a {c}.", - lambda c: f"a photo of a nice {c}.", - lambda c: f"a photo of a weird {c}.", - lambda c: f"a blurry photo of a {c}.", - lambda c: f"a cartoon {c}.", - lambda c: f"art of a {c}.", - lambda c: f"a sketch of the {c}.", - lambda c: f"a embroidered {c}.", - lambda c: f"a pixelated photo of a {c}.", - lambda c: f"itap of the {c}.", - lambda c: f"a jpeg corrupted photo of the {c}.", - lambda c: f"a good photo of a {c}.", - lambda c: f"a plushie {c}.", - lambda c: f"a photo of the nice {c}.", - lambda c: f"a photo of the small {c}.", - lambda c: f"a photo of the weird {c}.", - lambda c: f"the cartoon {c}.", - lambda c: f"art of the {c}.", - lambda c: f"a drawing of the {c}.", - lambda c: f"a photo of the large {c}.", - lambda c: f"a black and white photo of a {c}.", - lambda c: f"the plushie {c}.", - lambda c: f"a dark photo of a {c}.", - lambda c: f"itap of a {c}.", - lambda c: f"graffiti of the {c}.", - lambda c: f"a toy {c}.", - lambda c: f"itap of my {c}.", - lambda c: f"a photo of a cool {c}.", - lambda c: f"a photo of a small {c}.", - lambda c: f"a tattoo of the {c}.", -] diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_action_output.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_action_output.py deleted file mode 100644 index a556789dbb2c438c15ec651196fd49a6c781616a..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_action_output.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -""" -@Time : 2023/7/11 10:49 -@Author : chengmaoyu -@File : test_action_output -""" -from typing import List, Tuple - -from metagpt.actions import ActionOutput - -t_dict = {"Required Python third-party packages": "\"\"\"\nflask==1.1.2\npygame==2.0.1\n\"\"\"\n", - "Required Other language third-party packages": "\"\"\"\nNo third-party packages required for other languages.\n\"\"\"\n", - "Full API spec": "\"\"\"\nopenapi: 3.0.0\ninfo:\n title: Web Snake Game API\n version: 1.0.0\npaths:\n /game:\n get:\n summary: Get the current game state\n responses:\n '200':\n description: A JSON object of the game state\n post:\n summary: Send a command to the game\n requestBody:\n required: true\n content:\n application/json:\n schema:\n type: object\n properties:\n command:\n type: string\n responses:\n '200':\n description: A JSON object of the updated game state\n\"\"\"\n", - "Logic Analysis": [ - ["app.py", "Main entry point for the Flask application. Handles HTTP requests and responses."], - ["game.py", "Contains the Game and Snake classes. Handles the game logic."], - ["static/js/script.js", "Handles user interactions and updates the game UI."], - ["static/css/styles.css", "Defines the styles for the game UI."], - ["templates/index.html", "The main page of the web application. Displays the game UI."]], - "Task list": ["game.py", "app.py", "static/css/styles.css", "static/js/script.js", "templates/index.html"], - "Shared Knowledge": "\"\"\"\n'game.py' contains the Game and Snake classes which are responsible for the game logic. The Game class uses an instance of the Snake class.\n\n'app.py' is the main entry point for the Flask application. It creates an instance of the Game class and handles HTTP requests and responses.\n\n'static/js/script.js' is responsible for handling user interactions and updating the game UI based on the game state returned by 'app.py'.\n\n'static/css/styles.css' defines the styles for the game UI.\n\n'templates/index.html' is the main page of the web application. It displays the game UI and loads 'static/js/script.js' and 'static/css/styles.css'.\n\"\"\"\n", - "Anything UNCLEAR": "We need clarification on how the high score should be stored. Should it persist across sessions (stored in a database or a file) or should it reset every time the game is restarted? Also, should the game speed increase as the snake grows, or should it remain constant throughout the game?"} - -WRITE_TASKS_OUTPUT_MAPPING = { - "Required Python third-party packages": (str, ...), - "Required Other language third-party packages": (str, ...), - "Full API spec": (str, ...), - "Logic Analysis": (List[Tuple[str, str]], ...), - "Task list": (List[str], ...), - "Shared Knowledge": (str, ...), - "Anything UNCLEAR": (str, ...), -} - - -def test_create_model_class(): - test_class = ActionOutput.create_model_class("test_class", WRITE_TASKS_OUTPUT_MAPPING) - assert test_class.__name__ == "test_class" - - -def test_create_model_class_with_mapping(): - t = ActionOutput.create_model_class("test_class_1", WRITE_TASKS_OUTPUT_MAPPING) - t1 = t(**t_dict) - value = t1.dict()["Task list"] - assert value == ["game.py", "app.py", "static/css/styles.css", "static/js/script.js", "templates/index.html"] - - -if __name__ == '__main__': - test_create_model_class() - test_create_model_class_with_mapping() diff --git a/spaces/diacanFperku/AutoGPT/Free Download Surah Al Waqiah Pdf Free.md b/spaces/diacanFperku/AutoGPT/Free Download Surah Al Waqiah Pdf Free.md deleted file mode 100644 index fbdffacbd357424f48d3fba2c32ec8e660edcbb5..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Free Download Surah Al Waqiah Pdf Free.md +++ /dev/null @@ -1,78 +0,0 @@ - -

      Free Download Surah Al-Waqi'ah PDF: How and Why to Read the Surah

      -

      If you are looking for a way to download Surah Al-Waqi'ah PDF for free, you have come to the right place. In this article, we will show you how and why to read this surah, which is one of the most important and beneficial chapters of the Quran.

      -

      What is Surah Al-Waqi'ah?

      -

      Surah Al-Waqi'ah is the 56th surah of the Quran, which consists of 96 verses. The word Waqi'ah means "the event" or "the inevitable", and it refers to the Day of Judgment, which is the main theme of the surah. The surah describes the scenes of the Day of Judgment, such as the resurrection of the dead, the division of people into three groups (the foremost, the companions of the right, and the companions of the left), and the rewards and punishments for each group. The surah also mentions some of the blessings and wonders of Paradise and some of the horrors and torments of Hell.

      -

      free download surah al waqiah pdf


      Download ››› https://gohhs.com/2uFTzV



      -

      How to Download Surah Al-Waqi'ah PDF for Free?

      -

      There are many websites that offer free download Surah Al-Waqi'ah PDF in different formats and languages. Here are some of them:

      -
        -
      • The ALKALAM PDF website provides Surah Al-Waqi'ah PDF in Arabic with Indo-Pak style. You can download it from here.
      • -
      • The Surah Quran website provides Surah Al-Waqi'ah PDF in Arabic with Hafs A'n Assem narration. You can download it from here.
      • -
      • The Internet Archive website provides Surah Al-Waqi'ah PDF in Arabic with OCR text recognition. You can download it from here.
      • -
      • The Hamari Web website provides Surah Al-Waqi'ah PDF in Arabic with Urdu translation. You can download it from here.
      • -
      -

      Why to Read Surah Al-Waqi'ah?

      -

      Reading Surah Al-Waqi'ah has many benefits and virtues for Muslims. Here are some of them:

      -
        -
      • Reading Surah Al-Waqi'ah increases one's faith and awareness of Allah's power and justice. It reminds one of the reality and seriousness of the Day of Judgment, and motivates one to prepare for it by doing good deeds and avoiding sins.
      • -
      • Reading Surah Al-Waqi'ah protects one from poverty and hardship. According to a hadith (saying) of Prophet Muhammad (peace be upon him), whoever recites Surah Al-Waqi'ah every night will never be afflicted by poverty or need.
      • -
      • Reading Surah Al-Waqi'ah grants one peace and tranquility. According to another hadith, whoever recites Surah Al-Waqi'ah will have a peaceful night.
      • -
      • Reading Surah Al-Waqi'ah brings one closer to Paradise and saves one from Hell. According to another hadith, whoever recites Surah Al-Waqi'ah will be among the companions of the right on the Day of Judgment, who will enter Paradise without any reckoning or punishment.
      • -
      -

      In conclusion, free download Surah Al-Waqi'ah PDF is a great way to access this surah anytime and anywhere. Reading this surah has many benefits and virtues for Muslims, such as increasing their faith, protecting them from poverty, granting them peace, and bringing them closer to Paradise.

      -

      How to Recite Surah Al-Waqi'ah?

      -

      Reciting Surah Al-Waqi'ah is not only beneficial, but also easy and simple. Here are some tips on how to recite this surah:

      -
        -
      • Make sure you are in a state of purity and cleanliness. Perform ablution (wudu) before reciting the surah.
      • -
      • Find a quiet and comfortable place to recite the surah. You can use a printed copy of the Quran, a PDF file, or an app on your device.
      • -
      • Start with seeking refuge from Satan and praising Allah. Say "A'udhu billahi min ash-shaytan ir-rajim" (I seek refuge with Allah from Satan, the accursed) and "Bismillahir Rahmanir Rahim" (In the name of Allah, the Most Gracious, the Most Merciful).
      • -
      • Recite the surah with proper pronunciation and intonation. Follow the rules of tajweed (the science of reciting the Quran) and try to imitate the recitation of a qualified teacher or reciter.
      • -
      • Recite the surah with understanding and reflection. Try to comprehend the meaning and message of the verses, and relate them to your life and situation.
      • -
      • Recite the surah with devotion and sincerity. Feel the emotions and sentiments of the verses, and ask Allah for His mercy and forgiveness.
      • -
      -

      What are the Sources of Surah Al-Waqi'ah?

      -

      Surah Al-Waqi'ah is a part of the Quran, which is the word of Allah revealed to Prophet Muhammad (peace be upon him) through the angel Gabriel (peace be upon him). The Quran is the primary source of guidance and legislation for Muslims, and it is preserved in its original form until today.

      -

      The Quran is divided into 114 chapters (surahs), which are further divided into verses (ayahs). The Quran was revealed over a period of 23 years, from 610 CE to 632 CE. The Quran was revealed in stages according to the events and circumstances that occurred during the life of Prophet Muhammad (peace be upon him) and his companions.

      -

      The Quran is classified into two categories based on the place of revelation: Meccan and Medinan. Meccan surahs are those that were revealed before the migration (hijrah) of Prophet Muhammad (peace be upon him) from Mecca to Medina in 622 CE. Medinan surahs are those that were revealed after the hijrah.

      -

      -

      Surah Al-Waqi'ah is a Meccan surah, which means it was revealed in Mecca before the hijrah. It is one of the late Meccan surahs, which were revealed towards the end of the Meccan period. These surahs mainly focus on the themes of monotheism, resurrection, judgment, heaven, hell, and prophethood.

      -

      In conclusion, free download Surah Al-Waqi'ah PDF is a great way to access this surah anytime and anywhere. Reading this surah has many benefits and virtues for Muslims, such as increasing their faith, protecting them from poverty, granting them peace, and bringing them closer to Paradise.

      -

      How to Memorize Surah Al-Waqi'ah?

      -

      Memorizing Surah Al-Waqi'ah is a great way to increase one's knowledge and reward. Here are some tips on how to memorize this surah:

      -
        -
      • Make a sincere intention and ask Allah for help and guidance. Memorizing the Quran is an act of worship and a means of getting closer to Allah.
      • -
      • Choose a suitable time and place to memorize the surah. The best time is after Fajr (dawn) prayer, when the mind is fresh and alert. The best place is a quiet and clean place, where there are no distractions or interruptions.
      • -
      • Divide the surah into small portions and repeat them regularly. You can start with one verse or one page per day, depending on your ability and speed. You can also use a PDF file or an app to help you with the recitation and revision.
      • -
      • Understand the meaning and context of the verses. This will help you to memorize them faster and better. You can use a translation, a commentary, or a video lecture to learn more about the surah.
      • -
      • Review the surah frequently and consistently. This will help you to retain the surah in your memory and prevent you from forgetting it. You can review the surah daily, weekly, monthly, or whenever you have time.
      • -
      -

      What are the Similar Surahs to Surah Al-Waqi'ah?

      -

      Surah Al-Waqi'ah is not the only surah that talks about the Day of Judgment and its consequences. There are many other surahs that have similar themes and messages to Surah Al-Waqi'ah. Here are some of them:

      -
        -
      • Surah Al-Qiyamah (The Resurrection): This is the 75th surah of the Quran, which consists of 40 verses. It describes the events of the Day of Resurrection, such as the blowing of the trumpet, the gathering of people, the questioning of deeds, and the witnessing of one's own soul.
      • -
      • Surah Al-Mulk (The Sovereignty): This is the 67th surah of the Quran, which consists of 30 verses. It emphasizes the power and sovereignty of Allah over everything in the heavens and the earth. It also warns of the punishment of Hell and encourages the believers to reflect on Allah's signs.
      • -
      • Surah Al-Rahman (The Most Gracious): This is the 55th surah of the Quran, which consists of 78 verses. It praises Allah for His countless blessings and favors upon His creation. It also contrasts the rewards of Paradise and the punishments of Hell for those who obey or disobey Allah.
      • -
      -

      In conclusion, free download Surah Al-Waqi'ah PDF is a great way to access this surah anytime and anywhere. Reading this surah has many benefits and virtues for Muslims, such as increasing their faith, protecting them from poverty, granting them peace, and bringing them closer to Paradise.

      -

      How to Share Surah Al-Waqi'ah with Others?

      -

      Sharing Surah Al-Waqi'ah with others is a great way to spread the message and wisdom of the Quran. Here are some ways to share this surah with others:

      -
        -
      • You can share the PDF file of Surah Al-Waqi'ah with your friends and family via email, social media, or messaging apps. You can also print the PDF file and distribute it to others.
      • -
      • You can recite Surah Al-Waqi'ah in your prayers, especially in the night prayers (tahajjud). You can also recite it in gatherings, such as family meetings, study circles, or Islamic events.
      • -
      • You can teach Surah Al-Waqi'ah to others, especially to children and new Muslims. You can help them learn the recitation, meaning, and context of the surah. You can also use online resources, such as videos, audios, or quizzes to make the learning process more fun and interactive.
      • -
      • You can act upon Surah Al-Waqi'ah in your daily life, by applying its teachings and lessons. You can increase your faith in Allah and His power and justice. You can prepare for the Day of Judgment by doing good deeds and avoiding sins. You can also seek Allah's mercy and forgiveness for yourself and others.
      • -
      -

      What are the Challenges of Reading Surah Al-Waqi'ah?

      -

      Reading Surah Al-Waqi'ah may not be easy for everyone. There may be some challenges or difficulties that one may face while reading this surah. Here are some of them:

      -
        -
      • The challenge of understanding the language and style of the Quran. The Quran is written in classical Arabic, which may be different from the modern Arabic that one may be familiar with. The Quran also uses various literary devices, such as metaphors, similes, parables, etc., which may require some background knowledge and explanation.
      • -
      • The challenge of memorizing and retaining the surah. The surah has 96 verses, which may seem long and hard to memorize. The surah also has some similar verses that may cause confusion or mix-up.
      • -
      • The challenge of facing the reality and seriousness of the Day of Judgment. The surah describes the scenes of the Day of Judgment in vivid and graphic details, which may be frightening and disturbing for some people. The surah also warns of the consequences of one's actions and choices in this life, which may cause regret and remorse.
      • -
      -

      However, these challenges should not discourage or prevent one from reading Surah Al-Waqi'ah. Rather, they should motivate one to seek Allah's help and guidance, and to strive harder to learn and benefit from this surah.

      -

      In conclusion, free download Surah Al-Waqi'ah PDF is a great way to access this surah anytime and anywhere. Reading this surah has many benefits and virtues for Muslims, such as increasing their faith, protecting them from poverty, granting them peace, and bringing them closer to Paradise.

      -

      Conclusion

      -

      Free download Surah Al-Waqi'ah PDF is a convenient and beneficial way to access this surah anytime and anywhere. Surah Al-Waqi'ah is one of the most important and beneficial chapters of the Quran, which talks about the Day of Judgment and its consequences. Reading this surah has many benefits and virtues for Muslims, such as increasing their faith, protecting them from poverty, granting them peace, and bringing them closer to Paradise. We highly recommend that you download Surah Al-Waqi'ah PDF and read this surah regularly and sincerely.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/STATISTICA8SerialKeykeygen ((EXCLUSIVE)).md b/spaces/diacanFperku/AutoGPT/STATISTICA8SerialKeykeygen ((EXCLUSIVE)).md deleted file mode 100644 index 946ee859cbefd0b3d425ab5f691f942610495763..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/STATISTICA8SerialKeykeygen ((EXCLUSIVE)).md +++ /dev/null @@ -1,93 +0,0 @@ -
      -

      STATISTICA 8 Serial Key Keygen: What You Need to Know

      -

      If you are a software developer or a user of STATISTICA 8, a powerful data analysis and visualization tool, you might be wondering how to get a serial key keygen for your application. A serial key keygen is a program that generates unique serial keys for software activation and registration. Serial keys are usually required to prevent unauthorized use and distribution of software products.

      -

      In this article, we will explain what STATISTICA 8 serial key keygen is, how to find it, how to use it, and what are the benefits and risks of using it. We will also provide some alternatives to serial key keygen that might be more reliable and secure.

      -

      STATISTICA8SerialKeykeygen


      Download Zip ✯✯✯ https://gohhs.com/2uFTSs



      -

      What is STATISTICA 8 Serial Key Keygen?

      -

      STATISTICA 8 serial key keygen is a program that can generate serial keys for STATISTICA 8 applications. Serial keys are alphanumeric codes that are used to activate and register software products. STATISTICA 8 serial keys usually have the format XXXX-XXXX-XXXX-XXXX-XXXX, where X can be any letter or number.

      -

      A serial key keygen can create multiple serial keys for the same application, which can be used to install and run the software on different devices. Some serial key keygens can also generate license files, which are files that contain information about the software license and expiration date.

      -

      How to Find STATISTICA 8 Serial Key Keygen?

      -

      There are many websites that claim to offer STATISTICA 8 serial key keygen for free download. However, not all of them are trustworthy and reliable. Some of them might contain malware, viruses, or spyware that can harm your computer or steal your personal information. Some of them might also provide fake or invalid serial keys that will not work or will cause problems with your software.

      -

      Therefore, it is important to be careful and cautious when looking for STATISTICA 8 serial key keygen online. Here are some tips to help you find a safe and legitimate source:

      -
        -
      • Check the reputation and reviews of the website. Look for feedback from other users who have downloaded the program. Avoid websites that have negative comments, complaints, or warnings.
      • -
      • Scan the file with an antivirus program before opening it. Make sure the file is free of any malware, viruses, or spyware that can harm your computer or compromise your security.
      • -
      • Verify the authenticity and validity of the serial keys. Try to use the serial keys on a test device or a virtual machine first. Make sure they work properly and do not cause any errors or issues with your software.
      • -
      -

      How to Use STATISTICA 8 Serial Key Keygen?

      -

      If you have found a safe and legitimate STATISTICA 8 serial key keygen, you can use it to generate serial keys for your application. Here are the steps to follow:

      -

      -
        -
      1. Download and install STATISTICA 8 on your device. You can get the setup file from the official website of TIBCO Software, the developer of STATISTICA.
      2. -
      3. Run the STATISTICA 8 serial key keygen program on your device. You might need to disable your antivirus program temporarily if it blocks the program.
      4. -
      5. Select the type of application you want to generate a serial key for, such as C# .NET, Visual Basic .NET, Delphi, C++ Builder, or Java.
      6. -
      7. Click on the Generate button to create a random serial key. You can also customize the number and combination of characters in the serial key.
      8. -
      9. Copy and paste the serial key into the activation window of STATISTICA 8. You might also need to copy and paste a license file if the program provides one.
      10. -
      11. Click on the Activate button to complete the registration process. You can now use STATISTICA 8 on your device.
      12. -
      -

      What are the Benefits and Risks of Using STATISTICA 8 Serial Key Keygen?

      -

      Using STATISTICA 8 serial key keygen might seem like an easy and convenient way to get access to a powerful data analysis and visualization tool. However, there are also some benefits and risks that you should be aware of before using it.

      -

      Some of the benefits of using STATISTICA 8 serial key keygen are:

      -
        -
      • You can save money by not paying for a license fee or a subscription plan.
      • -
      • You can use STATISTICA 8 on multiple devices without any limitations or restrictions.
      • -
      • You can enjoy all the features and functions of STATISTICA 8 without any compromises or drawbacks.
      • -
      -

      Some of the risks of using STATISTICA 8 serial key keygen are:

      -
        -
      • You might violate the terms and conditions of TIBCO Software, which could result in legal actions or penalties.
      • -
      • You might expose your computer or device to malware, viruses, or spyware that could damage your system or steal your data.
      • -
      • You might get fake or invalid serial keys that will not work or will cause problems with your software.
      • -
      • You might miss out on updates, patches, or support from TIBCO Software, which could affect the performance or security of your software.
      • -
      -

      What are Some Alternatives to STATISTICA 8 Serial Key Keygen?

      -

      If you want to use STATISTICA 8 without risking any legal or technical issues, you might want to consider some alternatives to serial key keygen. Here are some options that you can try:

      -
        -
      • Purchase a license or a subscription plan from TIBCO Software. This is the most legitimate and secure way to use STATISTICA 8 on your device. You can choose from different plans and prices depending on your needs and preferences.
      • -
      • Request a free trial from TIBCO Software. This is a good way to test out STATISTICA 8 before buying it. You can get a free trial for up to 30 days with full access to all features and functions.
      • -
      • Use an open-source or free alternative to STATISTICA 8. There are many other data analysis and visualization tools that you can use without paying anything or using a serial key. Some examples are RStudio, Python, KNIME, Orange, Tableau Public, etc.
      • -
      - -

      Conclusion

      - -

      In conclusion, STATISTICA 8 serial key keygen is a program that can generate serial keys for STATISTICA 8 applications. However, using it might involve some benefits and risks that you should weigh carefully before deciding. Alternatively, you can try some other ways to use STATISTICA 8 without using a serial key keygen.

      -

      How to Download and Install STATISTICA 8 on Your Device?

      -

      Before you can use STATISTICA 8 serial key keygen, you need to download and install STATISTICA 8 on your device. STATISTICA 8 is a data analysis and visualization tool that can help you perform various statistical tasks, such as data mining, quality control, predictive modeling, and more. Here are the steps to download and install STATISTICA 8 on your device:

      -
        -
      1. Go to the official website of TIBCO Software, the developer of STATISTICA, and click on the Download button.
      2. -
      3. Select the version of STATISTICA 8 that suits your device and operating system. You can choose from 32-bit or 64-bit versions.
      4. -
      5. Enter your email address and other required information to get the download link.
      6. -
      7. Click on the download link and save the setup file to your device.
      8. -
      9. Run the setup file and follow the instructions to complete the installation.
      10. -
      11. Launch STATISTICA 8 on your device and enter the serial key that you generated with STATISTICA 8 serial key keygen.
      12. -
      -

      What are the Features and Benefits of STATISTICA 8?

      -

      STATISTICA 8 is a powerful data analysis and visualization tool that can help you perform various statistical tasks, such as data mining, quality control, predictive modeling, and more. Some of the features and benefits of STATISTICA 8 are:

      -
        -
      • It supports a wide range of data formats, such as Excel, SPSS, SAS, SQL, Access, etc.
      • -
      • It provides a user-friendly interface with drag-and-drop functionality and customizable menus.
      • -
      • It offers a comprehensive set of statistical tools, such as descriptive statistics, hypothesis testing, regression analysis, ANOVA, factor analysis, cluster analysis, etc.
      • -
      • It enables you to create interactive graphs and charts with various options and styles.
      • -
      • It allows you to automate your workflows with macros and scripts.
      • -
      • It integrates with other applications, such as Microsoft Office, R, Python, etc.
      • -
      -

      How to Troubleshoot STATISTICA 8 Serial Key Keygen Issues?

      -

      Sometimes, you might encounter some issues or errors when using STATISTICA 8 serial key keygen. For example, you might get a message that says "Invalid serial key" or "License expired". Or you might experience some performance or stability problems with your software. Here are some tips to help you troubleshoot STATISTICA 8 serial key keygen issues:

      -
        -
      • Make sure you have downloaded the correct version of STATISTICA 8 serial key keygen for your device and operating system.
      • -
      • Make sure you have entered the serial key correctly and without any typos or spaces.
      • -
      • Make sure you have copied and pasted the license file if provided by the program.
      • -
      • Make sure you have an active internet connection and disable any firewall or antivirus program that might block the activation process.
      • -
      • Make sure you have updated your software to the latest version and applied any patches or fixes from TIBCO Software.
      • -
      • Make sure you have not used the same serial key on more than one device or shared it with others.
      • -
      -

      How to Contact TIBCO Software for Support?

      -

      If you have any questions or issues regarding STATISTICA 8 or STATISTICA 8 serial key keygen, you can contact TIBCO Software for support. TIBCO Software is the developer and owner of STATISTICA, a data analysis and visualization tool that has been acquired by TIBCO in 2017. TIBCO Software provides various ways to contact them for support, such as:

      -
        -
      • Email: You can send an email to support@tibco.com with your name, email address, product name, version, serial number, and a description of your issue or question.
      • -
      • Phone: You can call the toll-free number 1-800-441-8454 (US and Canada) or +1-650-846-1000 (International) to speak with a customer service representative.
      • -
      • Chat: You can use the live chat feature on the website of TIBCO Software to chat with a support agent online.
      • -
      • Web: You can visit the website of TIBCO Software and access the support portal, where you can find FAQs, knowledge base articles, forums, downloads, documentation, and more.
      • -

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/data_utils.py b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/data_utils.py deleted file mode 100644 index 2c98d3dc8b9572bd05859033a74d155425a2a2ab..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/data_utils.py +++ /dev/null @@ -1,332 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data -import torchaudio -import commons -from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import cleaned_text_to_sequence, get_bert - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.spk_map = hparams.spk2id - self.hparams = hparams - - self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False) - if self.use_mel_spec_posterior: - self.n_mel_channels = getattr(hparams, "n_mel_channels", 80) - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 300) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - skipped = 0 - for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text: - audiopath = f'{_id}' - if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len: - phones = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - else: - skipped += 1 - print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text - - bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath) - - spec, wav = self.get_audio(audiopath) - sid = torch.LongTensor([int(self.spk_map[sid])]) - return (phones, spec, wav, sid, tone, language, bert) - - def get_audio(self, filename): - audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True) - ''' - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - ''' - spec_filename = filename.replace(".wav", ".spec.pt") - if self.use_mel_spec_posterior: - spec_filename = spec_filename.replace(".spec.pt", ".mel.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - if self.use_mel_spec_posterior: - # if os.path.exists(filename.replace(".wav", ".spec.pt")): - # # spec, n_fft, num_mels, sampling_rate, fmin, fmax - # spec = spec_to_mel_torch( - # torch.load(filename.replace(".wav", ".spec.pt")), - # self.filter_length, self.n_mel_channels, self.sampling_rate, - # self.hparams.mel_fmin, self.hparams.mel_fmax) - spec = mel_spectrogram_torch(audio_norm, self.filter_length, - self.n_mel_channels, self.sampling_rate, self.hop_length, - self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text, word2ph, phone, tone, language_str, wav_path): - # print(text, word2ph,phone, tone, language_str) - pold = phone - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - pold2 = phone - - if self.add_blank: - p1 = len(phone) - phone = commons.intersperse(phone, 0) - p2 = len(phone) - t1 = len(tone) - tone = commons.intersperse(tone, 0) - t2 = len(tone) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - torch.save(bert, bert_path) - #print(bert.shape[-1], bert_path, text, pold) - assert bert.shape[-1] == len(phone) - - assert bert.shape[-1] == len(phone), ( - bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho) - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, phone, tone, language - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - tone_padded = torch.LongTensor(len(batch), max_text_len) - language_padded = torch.LongTensor(len(batch), max_text_len) - bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len) - - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - tone_padded.zero_() - language_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - bert_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - tone = row[4] - tone_padded[i, :tone.size(0)] = tone - - language = row[5] - language_padded[i, :language.size(0)] = language - - bert = row[6] - bert_padded[i, :, :bert.size(1)] = bert - - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if (len_bucket == 0): - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/models.py b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/models.py deleted file mode 100644 index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/models.py +++ /dev/null @@ -1,707 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages -class DurationDiscriminator(nn.Module): #vits2 - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - -class TransformerCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False - ): - - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, g=None): - x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self, spec_channels, gin_channels=0): - - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [weight_norm(nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1))) for i in range(K)] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer = 4, - n_layers_trans_flow = 3, - flow_share_parameter = False, - use_transformer_flow = True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - if use_transformer_flow: - self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter) - else: - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels) - self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers >= 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None): - #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/README_zh.md b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/README_zh.md deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/README_zh.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/docs-demos/openai-gpt/README.md b/spaces/docs-demos/openai-gpt/README.md deleted file mode 100644 index 133dde3645cf48dd9c4e2f69bcf9b5d19b163d0e..0000000000000000000000000000000000000000 --- a/spaces/docs-demos/openai-gpt/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: OpenAI GPT -emoji: 🔥 -colorFrom: purple -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/dorkai/ChatUIPro/app/components/welcome/style.module.css b/spaces/dorkai/ChatUIPro/app/components/welcome/style.module.css deleted file mode 100644 index b007bf23a01b68912805ddad75ea8c5eecc2b6bf..0000000000000000000000000000000000000000 --- a/spaces/dorkai/ChatUIPro/app/components/welcome/style.module.css +++ /dev/null @@ -1,29 +0,0 @@ -.boxShodow { - box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03); -} - -.bgGrayColor { - background-color: #F9FAFB; -} - -.headerBg { - height: 3.5rem; - padding-left: 1.5rem; - padding-right: 1.5rem; -} - -.formLabel { - width: 120px; - margin-right: 8px; -} - -.customBtn { - width: 136px; -} - -.logo { - width: 48px; - height: 20px; - background: url(./icons/logo.png) center center no-repeat; - background-size: contain; -} \ No newline at end of file diff --git a/spaces/eaedk/agri-tech-fastapi-with-GUI/assets/static/styles.css b/spaces/eaedk/agri-tech-fastapi-with-GUI/assets/static/styles.css deleted file mode 100644 index 188a9df656f3475eef5dabc573af9b364c24e9c1..0000000000000000000000000000000000000000 --- a/spaces/eaedk/agri-tech-fastapi-with-GUI/assets/static/styles.css +++ /dev/null @@ -1,7 +0,0 @@ -h1 { - color: rgb(81, 146, 43); -} - -p { - font-weight: 600; -} diff --git a/spaces/eetn/DALL-E/app.py b/spaces/eetn/DALL-E/app.py deleted file mode 100644 index 90cb79f6791bae83fdf1539f38f597ee770ba9db..0000000000000000000000000000000000000000 --- a/spaces/eetn/DALL-E/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/flax-community/dalle-mini").launch() \ No newline at end of file diff --git a/spaces/ekosetiawan/flowers_classifier/app.py b/spaces/ekosetiawan/flowers_classifier/app.py deleted file mode 100644 index 1013fa7f040cbc1b99fc7e3ada31db6a12217fb1..0000000000000000000000000000000000000000 --- a/spaces/ekosetiawan/flowers_classifier/app.py +++ /dev/null @@ -1,30 +0,0 @@ -from fastai.vision.all import * -import gradio as gr -# import pathlib -# temp = pathlib.PosixPath -# pathlib.PosixPath = pathlib.WindowsPath - - -learn = load_learner('flower_model.pkl') - -labels = learn.dls.vocab - -title = "Tullip, Lily Flower and Sunflower Classifier" -description = "A flower classifier that trained with internet picture and using transfer learning from Resnet, made by following FastAI Deep Learning Course of 2022." - - -examples = ['tulip3.jpg'] -interpretation = 'tullip' -enable_queue = True - - -def predict(img): - img = PILImage.create(img) - pred, pred_idx, probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - - -demo = gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(512, 512)), outputs=gr.outputs.Label(num_top_classes=3), title=title, - description=description, examples=examples, interpretation=interpretation, enable_queue=enable_queue) - -demo.launch(share=True) diff --git a/spaces/eruuin/question-answering/README.md b/spaces/eruuin/question-answering/README.md deleted file mode 100644 index 498dc23646eec88fa12d458d20ec61e3f4dc06f3..0000000000000000000000000000000000000000 --- a/spaces/eruuin/question-answering/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Question Answering -emoji: 📊 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/eubinecto/idiomify/main_deploy.py b/spaces/eubinecto/idiomify/main_deploy.py deleted file mode 100644 index 5ac27f989e8514925c783cc732784c2b7384b161..0000000000000000000000000000000000000000 --- a/spaces/eubinecto/idiomify/main_deploy.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -we deploy the pipeline via streamlit. -""" -import re -import streamlit as st -from idiomify.fetchers import fetch_pipeline -from idiomify.pipeline import Pipeline - - -@st.cache(allow_output_mutation=True) -def cache_pipeline() -> Pipeline: - return fetch_pipeline() - - -def main(): - # fetch a pre-trained model - pipeline = cache_pipeline() - st.title("Idiomify Demo") - text = st.text_area("Type sentences here", - value="Just remember that there will always be a hope even when things look hopeless") - with st.sidebar: - st.subheader("Supported idioms") - idioms = [row["Idiom"] for _, row in pipeline.idioms.iterrows()] - st.write(" / ".join(idioms)) - - if st.button(label="Idiomify"): - with st.spinner("Please wait..."): - sents = [sent for sent in text.split(".") if sent] - preds = pipeline(sents, max_length=200) - # highlight the rule & honorifics that were applied - preds = [re.sub(r"|", "`", pred) - for pred in preds] - st.markdown(". ".join(preds)) - - -if __name__ == '__main__': - main() diff --git a/spaces/faizhalas/coconut/pages/1 Keywords Stem.py b/spaces/faizhalas/coconut/pages/1 Keywords Stem.py deleted file mode 100644 index 4a786577012e415ab191bc8adc870d5024ab9703..0000000000000000000000000000000000000000 --- a/spaces/faizhalas/coconut/pages/1 Keywords Stem.py +++ /dev/null @@ -1,211 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import re -import nltk -nltk.download('wordnet') -from nltk.stem import WordNetLemmatizer -nltk.download('stopwords') -from nltk.corpus import stopwords -from pprint import pprint -import pickle -import streamlit.components.v1 as components -from io import StringIO -from nltk.stem.snowball import SnowballStemmer -import csv -import sys - -#===config=== -st.set_page_config( - page_title="Coconut", - page_icon="🥥", - layout="wide" -) -st.header("Keywords Stem") -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) - -st.subheader('Put your file here...') - -def reset_data(): - st.cache_data.clear() - -#===check filetype=== -@st.cache_data(ttl=3600) -def get_ext(extype): - extype = uploaded_file.name - return extype - -#===upload=== -@st.cache_data(ttl=3600) -def upload(extype): - keywords = pd.read_csv(uploaded_file) - return keywords - -@st.cache_data(ttl=3600) -def conv_txt(extype): - col_dict = {'TI': 'Title', - 'SO': 'Source title', - 'DE': 'Author Keywords', - 'ID': 'Keywords Plus'} - keywords = pd.read_csv(uploaded_file, sep='\t', lineterminator='\r') - keywords.rename(columns=col_dict, inplace=True) - return keywords - -@st.cache_data(ttl=3600) -def rev_conv_txt(extype): - col_dict_rev = {'Title': 'TI', - 'Source title': 'SO', - 'Author Keywords': 'DE', - 'Keywords Plus': 'ID'} - keywords.rename(columns=col_dict_rev, inplace=True) - return keywords - -@st.cache_data(ttl=3600) -def get_data(extype): - list_of_column_key = list(keywords.columns) - list_of_column_key = [k for k in list_of_column_key if 'Keyword' in k] - return list_of_column_key - -uploaded_file = st.file_uploader("Choose your a file", type=['csv','txt'], on_change=reset_data) - -if uploaded_file is not None: - extype = get_ext(uploaded_file) - if extype.endswith('.csv'): - keywords = upload(extype) - - elif extype.endswith('.txt'): - keywords = conv_txt(extype) - - list_of_column_key = get_data(extype) - - col1, col2 = st.columns(2) - with col1: - method = st.selectbox( - 'Choose method', - ('Lemmatization', 'Stemming'), on_change=reset_data) - with col2: - keyword = st.selectbox( - 'Choose column', - (list_of_column_key), on_change=reset_data) - - @st.cache_data(ttl=3600) - def clean_keyword(extype): - global keyword, keywords - try: - key = keywords[keyword] - except KeyError: - st.error('Error: Please check your Author/Index Keywords column.') - sys.exit(1) - keywords = keywords.replace(np.nan, '', regex=True) - keywords[keyword] = keywords[keyword].astype(str) - keywords[keyword] = keywords[keyword].map(lambda x: re.sub('-', ' ', x)) - keywords[keyword] = keywords[keyword].map(lambda x: re.sub('; ', ' ; ', x)) - keywords[keyword] = keywords[keyword].map(lambda x: x.lower()) - - #===Keywords list=== - key = key.dropna() - key = pd.concat([key.str.split('; ', expand=True)], axis=1) - key = pd.Series(np.ravel(key)).dropna().drop_duplicates().sort_values().reset_index() - key[0] = key[0].map(lambda x: re.sub('-', ' ', x)) - key['new']=key[0].map(lambda x: x.lower()) - - return keywords, key - - #===stem/lem=== - @st.cache_data(ttl=3600) - def Lemmatization(extype): - lemmatizer = WordNetLemmatizer() - def lemmatize_words(text): - words = text.split() - words = [lemmatizer.lemmatize(word) for word in words] - return ' '.join(words) - keywords[keyword] = keywords[keyword].apply(lemmatize_words) - key['new'] = key['new'].apply(lemmatize_words) - keywords[keyword] = keywords[keyword].map(lambda x: re.sub(' ; ', '; ', x)) - return keywords, key - - @st.cache_data(ttl=3600) - def Stemming(extype): - stemmer = SnowballStemmer("english") - def stem_words(text): - words = text.split() - words = [stemmer.stem(word) for word in words] - return ' '.join(words) - keywords[keyword] = keywords[keyword].apply(stem_words) - key['new'] = key['new'].apply(stem_words) - keywords[keyword] = keywords[keyword].map(lambda x: re.sub(' ; ', '; ', x)) - return keywords, key - - keywords, key = clean_keyword(extype) - - if method is 'Lemmatization': - keywords, key = Lemmatization(extype) - else: - keywords, key = Stemming(extype) - - st.write('Congratulations! 🤩 You choose',keyword ,'with',method,'method. Now, you can easily download the result by clicking the button below') - st.divider() - - #===show & download csv=== - tab1, tab2, tab3, tab4 = st.tabs(["📥 Result", "📥 List of Keywords", "📃 Reference", "📃 Recommended Reading"]) - - with tab1: - st.dataframe(keywords, use_container_width=True) - @st.cache_data(ttl=3600) - def convert_df(extype): - return keywords.to_csv(index=False).encode('utf-8') - - @st.cache_data(ttl=3600) - def convert_txt(extype): - return keywords.to_csv(index=False, sep='\t', lineterminator='\r').encode('utf-8') - - if extype.endswith('.csv'): - csv = convert_df(extype) - st.download_button( - "Press to download result 👈", - csv, - "scopus.csv", - "text/csv") - - elif extype.endswith('.txt'): - keywords = rev_conv_txt(extype) - txt = convert_txt(extype) - st.download_button( - "Press to download result 👈", - txt, - "savedrecs.txt", - "text/csv") - - with tab2: - @st.cache_data(ttl=3600) - def table_keyword(extype): - keytab = key.drop(['index'], axis=1).rename(columns={0: 'old'}) - return keytab - keytab = table_keyword(extype) - st.dataframe(keytab, use_container_width=True) - - @st.cache_data(ttl=3600) - def convert_dfs(extype): - return key.to_csv(index=False).encode('utf-8') - - csv = convert_dfs(extype) - - st.download_button( - "Press to download keywords 👈", - csv, - "keywords.csv", - "text/csv") - - with tab3: - st.markdown('**Santosa, F. A. (2023). Prior steps into knowledge mapping: Text mining application and comparison. Issues in Science and Technology Librarianship, 102.** https://doi.org/10.29173/istl2736') - - with tab4: - st.markdown('**Beri, A. (2021, January 27). Stemming vs Lemmatization. Medium.** https://towardsdatascience.com/stemming-vs-lemmatization-2daddabcb221') - st.markdown('**Khyani, D., Siddhartha B S, Niveditha N M, & Divya B M. (2020). An Interpretation of Lemmatization and Stemming in Natural Language Processing. Journal of University of Shanghai for Science and Technology , 22(10), 350–357.** https://jusst.org/an-interpretation-of-lemmatization-and-stemming-in-natural-language-processing/') - st.markdown('**Lamba, M., & Madhusudhan, M. (2021, July 31). Text Pre-Processing. Text Mining for Information Professionals, 79–103.** https://doi.org/10.1007/978-3-030-85085-2_3') \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/From Neuron To Brain Nicholls Torrent.rar REPACK.md b/spaces/falterWliame/Face_Mask_Detection/From Neuron To Brain Nicholls Torrent.rar REPACK.md deleted file mode 100644 index 0818f2de48850004c79c04fe377d8010974c6109..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/From Neuron To Brain Nicholls Torrent.rar REPACK.md +++ /dev/null @@ -1,100 +0,0 @@ - -

      From Neuron To Brain Nicholls Torrent.rar: A Must-Have Book for Neuroscience Lovers

      - -

      If you are interested in learning about the function of the nervous system, you might want to download From Neuron To Brain Nicholls Torrent.rar. This is a book written by John G. Nicholls and other experts in the field of neurophysiology, cytology, and anatomy. It covers topics such as neurons, synapses, neurotransmitters, sensory systems, motor systems, learning and memory, and brain disorders.

      -

      From Neuron To Brain Nicholls Torrent.rar


      DOWNLOAD 🆓 https://urlca.com/2uDbZr



      - -

      From Neuron To Brain Nicholls Torrent.rar is a comprehensive and updated edition of the classic book From Neuron To Brain by Stephen W. Kuffler, John G. Nicholls, and A. Robert Martin. It has been revised and expanded to include new discoveries and research in neuroscience. It also features clear illustrations, diagrams, and examples to help you understand the complex concepts and mechanisms of the nervous system.

      - -

      Why You Should Download From Neuron To Brain Nicholls Torrent.rar

      - -

      There are many reasons why you should download From Neuron To Brain Nicholls Torrent.rar. Here are some of them:

      - -
        -
      • It is a free and easy way to access the book without spending money or waiting for delivery.
      • -
      • It is a high-quality and reliable file that contains the full text and images of the book.
      • -
      • It is compatible with most devices and platforms, such as computers, tablets, smartphones, e-readers, etc.
      • -
      • It is a valuable resource for students, teachers, researchers, clinicians, and anyone who wants to learn more about neuroscience.
      • -
      • It is a fascinating and engaging book that will keep you hooked from the first page to the last.
      • -
      - -

      How to Download From Neuron To Brain Nicholls Torrent.rar

      - -

      To download From Neuron To Brain Nicholls Torrent.rar, you need to follow these simple steps:

      -

      - -
        -
      1. Go to a reputable and safe torrent site that offers the file.
      2. -
      3. Search for From Neuron To Brain Nicholls Torrent.rar using the search bar or browse through the categories.
      4. -
      5. Select the file that has the most seeds and leeches to ensure fast and smooth download.
      6. -
      7. Click on the download button or magnet link to start downloading the file.
      8. -
      9. Open the file using a torrent client or software that can read .rar files.
      10. -
      11. Enjoy reading From Neuron To Brain Nicholls Torrent.rar on your device.
      12. -
      - -

      Conclusion

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that you should not miss if you want to learn about the function of the nervous system. It is a free and easy way to get access to the book using torrent sites and .rar files. It is a high-quality and reliable file that contains the full text and images of the book. It is compatible with most devices and platforms. It is a valuable resource for neuroscience lovers. It is a fascinating and engaging book that will keep you hooked from start to finish.

      - -

      So what are you waiting for? Download From Neuron To Brain Nicholls Torrent.rar today and enjoy reading this amazing book!

      -

      What You Will Learn From Neuron To Brain Nicholls Torrent.rar

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that will teach you everything you need to know about the function of the nervous system. You will learn about the structure and properties of neurons, the types and mechanisms of synapses, the role and diversity of neurotransmitters, the organization and function of sensory systems, the control and integration of motor systems, the processes and mechanisms of learning and memory, and the causes and consequences of brain disorders. You will also learn about the latest advances and discoveries in neuroscience, such as optogenetics, neuroimaging, gene therapy, stem cells, and artificial intelligence.

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that will challenge you to think critically and creatively about neuroscience. You will be exposed to different perspectives and approaches to studying the nervous system. You will be encouraged to apply your knowledge and skills to solve problems and answer questions. You will be inspired to explore further and deeper into the fascinating world of neuroscience.

      - -

      Who Should Download From Neuron To Brain Nicholls Torrent.rar

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that is suitable for anyone who wants to learn about neuroscience. Whether you are a student, a teacher, a researcher, a clinician, or a curious reader, you will find this book useful and enjoyable. You do not need any prior background or experience in neuroscience to read this book. You only need a passion and curiosity for learning.

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that is designed for different levels of learning. You can choose to read it from cover to cover or skip to the chapters that interest you most. You can also use it as a reference or a supplement for other books or courses on neuroscience. You can find additional resources and materials online to enhance your learning experience.

      -

      What Others Say About From Neuron To Brain Nicholls Torrent.rar

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that has received positive feedback and praise from many readers and reviewers. Here are some of the comments and testimonials that people have shared about this book:

      - -
      -

      "This is an excellent book for anyone who wants to learn about neuroscience. It is clear, comprehensive, and up-to-date. It covers all the major topics and concepts in a logical and coherent way. It also provides many examples and applications to illustrate the relevance and importance of neuroscience. I highly recommend this book to anyone who is interested in neuroscience."

      -A reader from Amazon.com -
      - -
      -

      "This is one of the best books on neuroscience that I have ever read. It is written by experts who have made significant contributions to the field. It is very well-organized and easy to follow. It explains the basic principles and mechanisms of the nervous system in a simple and understandable way. It also covers the latest advances and discoveries in neuroscience in a thorough and accurate way. It is a great book for students, teachers, researchers, and clinicians."

      -A reviewer from Goodreads.com -
      - -
      -

      "This is a masterpiece of neuroscience. It is a comprehensive and authoritative book that covers all aspects of the function of the nervous system. It is rich in details and insights, but also concise and clear. It is a book that will stimulate your curiosity and imagination, as well as your knowledge and understanding. It is a book that will inspire you to explore further and deeper into the fascinating world of neuroscience."

      -A reviewer from LibraryThing.com -
      - -

      Download From Neuron To Brain Nicholls Torrent.rar Now

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that you should not miss if you want to learn about neuroscience. It is a free and easy way to get access to the book using torrent sites and .rar files. It is a high-quality and reliable file that contains the full text and images of the book. It is compatible with most devices and platforms. It is a valuable resource for neuroscience lovers. It is a fascinating and engaging book that will keep you hooked from start to finish.

      - -

      So what are you waiting for? Download From Neuron To Brain Nicholls Torrent.rar now and enjoy reading this amazing book!

      -

      What Are The Benefits Of Reading From Neuron To Brain Nicholls Torrent.rar

      - -

      Reading From Neuron To Brain Nicholls Torrent.rar is not only a fun and enjoyable activity, but also a beneficial one. Here are some of the benefits that you can get from reading this book:

      - -
        -
      • It will improve your knowledge and understanding of neuroscience. You will learn about the function of the nervous system at different levels, from molecules to cells to circuits to systems. You will also learn about the latest discoveries and research in neuroscience that have implications for health and disease.
      • -
      • It will enhance your cognitive and analytical skills. You will be able to think critically and creatively about neuroscience. You will be able to apply your knowledge and skills to solve problems and answer questions. You will also be able to evaluate and synthesize information from different sources and perspectives.
      • -
      • It will stimulate your curiosity and imagination. You will be fascinated by the complexity and beauty of the nervous system. You will be inspired by the achievements and challenges of neuroscience. You will also be motivated to explore further and deeper into the fascinating world of neuroscience.
      • -
      - -

      Where To Find More Resources And Materials For From Neuron To Brain Nicholls Torrent.rar

      - -

      If you want to find more resources and materials for From Neuron To Brain Nicholls Torrent.rar, you can visit the following websites:

      - - -

      Conclusion

      - -

      From Neuron To Brain Nicholls Torrent.rar is a book that you should not miss if you want to learn about neuroscience. It is a free and easy way to get access to the book using torrent sites and .rar files. It is a high-quality and reliable file that contains the full text and images of the book. It is compatible with most devices and platforms. It is a valuable resource for neuroscience lovers. It is a fascinating and engaging book that will teach you everything you need to know about the function of the nervous system. It will also improve your cognitive and analytical skills, stimulate your curiosity and imagination, and inspire you to explore further and deeper into the fascinating world of neuroscience.

      - -

      So what are you waiting for? Download From Neuron To Brain Nicholls Torrent.rar today and enjoy reading this amazing book!

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Kontakt Library Torrent.md b/spaces/falterWliame/Face_Mask_Detection/Kontakt Library Torrent.md deleted file mode 100644 index c8ab8af3da04b5e3893f00e73addf3d2ae44b2cc..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Kontakt Library Torrent.md +++ /dev/null @@ -1,9 +0,0 @@ -

      Kontakt library torrent


      Download Zip ✓✓✓ https://urlca.com/2uDdpJ



      - -KONTAKT 6 PLAYER is a free app that runs all KONTAKT instruments from Native Instruments, as well as a large number of instruments from other companies. KONTAKT 6 Player has a very user-friendly and intuitive interface and is equipped with many advanced features, making it very convenient to use in your workflow. -Key features -KONTAKT 6 Player is an application that allows you to run all of Native Instruments' KONTAKT instruments and many more on your computer. -KONTAKT 6 Player is compatible with Windows 7, Windows 8, Windows 8.1, Windows 10 and Windows 10 64-bit operating systems. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu].md b/spaces/falterWliame/Face_Mask_Detection/Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu].md deleted file mode 100644 index b2a3cfba8142bb444d474fa4fef62401f54afb3d..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu].md +++ /dev/null @@ -1,127 +0,0 @@ - -

      Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]: How to Create Professional Slideshows with Ease

      - -

      Do you want to create stunning slideshows with your photos, videos, and music? Do you want to have full control over every aspect of your presentation? Do you want to save money and time by using a cracked version of a popular software? If you answered yes to any of these questions, then you might be interested in Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]. This is a modified version of the original Photodex Proshow Producer software, which is a powerful and easy-to-use tool for creating professional-looking slideshows.

      -

      Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]


      DOWNLOAD ––– https://urlca.com/2uDcmn



      - -

      What is Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

      - -

      Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] is a cracked version of the original Photodex Proshow Producer software, which is a slideshow creation tool that lets you combine photos, videos, and music into amazing presentations. You can customize every aspect of your slideshow, from transitions, effects, captions, templates, themes, and more. You can also output your slideshow to various formats, such as DVD, Blu-ray, YouTube, Facebook, Vimeo, and more.

      - -

      The patch-RES is a patch that removes the activation requirement of the software, so you can use it without purchasing a license. The ChingLiu is the name of the hacker who cracked the software and uploaded it to various torrent sites.

      - -

      How to download and install Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

      - -

      To download and install Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to follow these steps:

      - -
        -
      1. Go to one of the torrent sites that host the file, such as CDNAPOLICITY or Peatix.
      2. -
      3. Download the torrent file or magnet link and open it with a torrent client, such as uTorrent or BitTorrent.
      4. -
      5. Wait for the download to finish and extract the zip file to a folder on your computer.
      6. -
      7. Run the setup.exe file and follow the installation instructions.
      8. -
      9. Copy the patch-RES.exe file from the crack folder and paste it into the installation folder of Photodex Proshow Producer.
      10. -
      11. Run the patch-RES.exe file as administrator and click on patch.
      12. -
      13. Enjoy your full version of Photodex Proshow Producer 5.0.3222!
      14. -
      - -

      How to use Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

      - -

      To use Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to follow these steps:

      - -
        -
      1. Launch the software and choose whether you want to create a new show or open an existing one.
      2. -
      3. Add your photos, videos, and music by dragging and dropping them into the timeline or using the import wizard.
      4. -
      5. Edit your slideshow by applying transitions, effects, captions, templates, -themes

        -
      6. -
      7. Preview your slideshow by clicking on the play button or using the preview window.
      8. -
      9. Output your slideshow by choosing one of the output options from the publish menu or using the output wizard.
      10. -
      11. Select your desired format, quality, destination, and other settings and click on create.
      12. -
      13. Enjoy your amazing slideshow!
      14. -
      - -

      What are the benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

      - -

      There are many benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] over other slideshow software, such as:

      -

      - -
        -
      • It is free and fully functional without any limitations or watermarks.
      • -
      • It has a user-friendly interface and a simple workflow that makes it easy for anyone to create stunning slideshows.
      • -
      • It has a wide range of features and options that let you customize every aspect of your slideshow according to your preferences and needs.
      • -
      • It has a large collection of transitions, effects, captions, templates, themes, and more that you can use to enhance your slideshow.
      • -
      • It supports various formats and platforms for outputting your slideshow, such as DVD, Blu-ray, YouTube, Facebook, Vimeo, and more.
      • -
      • It can help you boost your SEO ranking by creating engaging and relevant content for your keyword.
      • -
      - -

      Conclusion

      - -

      If you are looking for a powerful and easy-to-use software to create stunning slideshows with your photos, videos, and music, you might want to check out Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]. This is a cracked version of the popular Photodex Proshow Producer software that allows you to create professional-looking slideshows with unlimited creative control.

      - -

      To download and install Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to go to one of the torrent sites that host the file and use a torrent client to download it. Then you need to extract the zip file and run the setup.exe file. After that, you need to copy the patch-RES.exe file from the crack folder and paste it into the installation folder of Photodex Proshow Producer. Then you need to run the patch-RES.exe file as administrator and click on patch.

      - -

      To use Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to launch the software and choose whether you want to create a new show or open an existing one. Then you need to add your photos, videos, and music by dragging and dropping them into the timeline or using the import wizard. Then you need to edit your slideshow by applying transitions, effects, captions, templates, -themes - -

    11. Preview your slideshow by clicking on the play button or using the preview window.
    12. -
    13. Output your slideshow by choosing one of the output options from the publish menu or using the output wizard.
    14. -
    15. Select your desired format, quality, destination, and other settings and click on create.
    16. -
    17. Enjoy your amazing slideshow!
    18. -
    - -

    What are the benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

    - -

    There are many benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] over other slideshow software, such as:

    - - - -

    Conclusion

    - -

    If you are looking for a powerful and easy-to-use software to create stunning slideshows with your photos, videos, and music, you might want to check out Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]. This is a cracked version of the popular Photodex Proshow Producer software that allows you to create professional-looking slideshows with unlimited creative control.

    - -

    To download and install Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to go to one of the torrent sites that host the file and use a torrent client to download it. Then you need to extract the zip file and run the setup.exe file. After that, you need to copy the patch-RES.exe file from the crack folder and paste it into the installation folder of Photodex Proshow Producer. Then you need to run the patch-RES.exe file as administrator and click on patch.

    - -

    To use Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to launch the software and choose whether you want to create a new show or open an existing one. Then you need to add your photos, videos, and music by dragging and dropping them into the timeline or using the import wizard. Then you need to edit your slideshow by applying transitions, effects, captions, templates, -themes -

  14. -
  15. Preview your slideshow by clicking on the play button or using the preview window.
  16. -
  17. Output your slideshow by choosing one of the output options from the publish menu or using the output wizard.
  18. -
  19. Select your desired format, quality, destination, and other settings and click on create.
  20. -
  21. Enjoy your amazing slideshow!
  22. -
- -

What are the benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]?

- -

There are many benefits of using Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] over other slideshow software, such as:

- - - -

Conclusion

- -

If you are looking for a powerful and easy-to-use software to create stunning slideshows with your photos, videos, and music, you might want to check out Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu]. This is a cracked version of the popular Photodex Proshow Producer software that allows you to create professional-looking slideshows with unlimited creative control.

- -

To download and install Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to go to one of the torrent sites that host the file and use a torrent client to download it. Then you need to extract the zip file and run the setup.exe file. After that, you need to copy the patch-RES.exe file from the crack folder and paste it into the installation folder of Photodex Proshow Producer. Then you need to run the patch-RES.exe file as administrator and click on patch.

- -

To use Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you need to launch the software and choose whether you want to create a new show or open an existing one. Then you need to add your photos, videos, and music by dragging and dropping them into the timeline or using the import wizard. Then you need to edit your slideshow by applying transitions, effects, captions, templates, -themes -

In conclusion, Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] is a great software for creating professional slideshows with your photos, videos, and music. It is free, easy to use, and full of features and options that let you customize your slideshow to your liking. It also supports various output formats and platforms that let you share your slideshow with your audience. It can also help you improve your SEO ranking by creating engaging and relevant content for your keyword.

- -

If you want to try Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu], you can download it from one of the torrent sites that host the file and install it using the patch-RES.exe file. Then you can launch the software and start creating your amazing slideshows with ease.

- -

Don't wait any longer and download Photodex Proshow Producer 5.0.3222 (patch-RES) [ChingLiu] today and see for yourself how it can transform your photos, videos, and music into stunning presentations!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/farandclose/AudioChatGPT/app.py b/spaces/farandclose/AudioChatGPT/app.py deleted file mode 100644 index 27440b57d15bab1670448cbdc276d940097d0d73..0000000000000000000000000000000000000000 --- a/spaces/farandclose/AudioChatGPT/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -os.system("pip install openai") -os.system("pip install ipython") -import openai -import requests -import tempfile -import gradio as gr -from IPython.display import Audio - - -def get_chat_completion(prompt, openai_api_key, elevenlabs_api_key): - openai.api_key = openai_api_key - - completion = openai.Completion.create( - engine="text-davinci-002", - prompt=prompt, - max_tokens=1024, - n=1, - stop=None, - temperature=0.7, - ) - message = completion.choices[0].text.strip() - audio_path = text_to_audio(message, elevenlabs_api_key) - return message, audio_path - -def text_to_audio(message,elevenlabs_api_key): - - elevenLabs_url = 'https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM/stream' - elevenLabs_headers = { - 'accept': '*/*', - 'xi-api-key': elevenlabs_api_key, - 'Content-Type': 'application/json' - } - - elevenLabs_data = { - "text": message, - "voice_settings": { - "stability": 0, - "similarity_boost": 0 - } - } - response = requests.post(elevenLabs_url, headers=elevenLabs_headers, json=elevenLabs_data, stream=True) - if response.status_code == 200: - with tempfile.NamedTemporaryFile(delete=False) as f: - f.write(response.content) - audio_path = f.name - print("line 27 - fine") - return audio_path - else: - return None - - -input_text = gr.inputs.Textbox(label="Type your prompt. Example: Tell me a short story") -input_openAIKey = gr.inputs.Textbox(label="Please enter your OpenAI API key") -input_EleLabsAIKey = gr.inputs.Textbox(label="Please enter your Eleven Labs API key") - -output_text = gr.outputs.Textbox(label="OpenAI response") -output_audio = gr.outputs.Audio(type="filepath", label="Eleven Labs audio") - -title = "Audio style ChatGPT 🔈" - - -article = gr.Interface(get_chat_completion, inputs=[input_text, input_openAIKey, input_EleLabsAIKey], outputs=[output_text, output_audio], title=title, cite="For feedback, please reach out to me on Twitter.") - - - -article.launch() \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download QuickQ Console Software and Update Your Lighting System.md b/spaces/fatiXbelha/sd/Download QuickQ Console Software and Update Your Lighting System.md deleted file mode 100644 index 291467b472aa930d1d826c0f64681e741e774db7..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download QuickQ Console Software and Update Your Lighting System.md +++ /dev/null @@ -1,91 +0,0 @@ -
-

What is QuickQ and why you need it

-

If you are looking for a VPN application that protects your network privacy and security, and guarantees high network speed, you should try QuickQ. QuickQ is an expert VPN application that has thousands of servers on all five continents, supports multiple protocols, and offers online customer service. In this article, we will show you how to download QuickQ for different devices, how to use it to protect your privacy and speed up your network, and how to get the most out of it.

-

How to download QuickQ for different devices

-

QuickQ is available for Android, iOS, macOS, Windows, and Linux devices. You can download it from different sources depending on your device.

-

quick q download


DOWNLOAD ✯✯✯ https://urllie.com/2uNEp7



- -

How to use QuickQ to protect your privacy and speed up your network

-

Once you have downloaded and installed QuickQ on your device, you can start using it to protect your privacy and speed up your network. Here are some steps to follow:

-
    -
  1. Connect to QuickQ with one click and choose the best server. When you open QuickQ, you will see a big button that says "Connect". Just tap on it and the system will intelligently recommend the best server based on your location. You can also switch between servers according to your personal preferences. You will see a green icon that indicates that you are connected to QuickQ.
  2. -
  3. Enjoy expert acceleration and access to your favorite media and websites. When you connect to QuickQ, you will enjoy expert acceleration that maximizes your web browsing speed. You will also be able to access your favorite media and websites that may be blocked or restricted in your region. For example, you can watch Netflix, YouTube, Hulu, BBC iPlayer, etc. from anywhere in the world.
  4. -
  5. Stay anonymous and secure with expert encryption algorithms. When you connect to QuickQ, you will also be protected by expert encryption algorithms that encrypt your network traffic. This means that no one can see your real IP address, geolocation, or network traces. You can browse the web without worrying about hackers, snoopers, or government surveillance. You can also avoid ISP throttling, which may slow down your network speed when you use certain websites or applications.
  6. -
-

How to get the most out of QuickQ

-

QuickQ is not only a VPN application, but also a powerful tool that can help you optimize your network performance and experience. Here are some tips to help you get the most out of QuickQ:

- -

Conclusion and FAQs

-

In conclusion, QuickQ is an expert VPN application that protects your network privacy and security, and guarantees high network speed. You can download it for free and enjoy a 7-day trial period. You can also choose from different subscription plans to continue using QuickQ. You can use it to connect to thousands of servers on all five continents, access your favorite media and websites, and stay anonymous and secure. You can also switch between different protocols, use the same account on 3 devices at the same time, and contact online customer service for any issues or questions.

-

If you still have some questions about QuickQ, here are some FAQs that may help you:

-
    -
  1. What is the difference between QuickQ and other VPN applications?
  2. -

    QuickQ is different from other VPN applications in several ways. First, QuickQ has more servers and locations than most VPN applications. Second, QuickQ supports more protocols than most VPN applications. Third, QuickQ offers online customer service that is more professional and friendly than most VPN applications.

    -

    quick q vpn download
    -quick q designer download
    -quick q console software download
    -quick q app download for android
    -quick q app download for ios
    -quick q app download for mac
    -quick q app download for windows
    -quick q app download for linux
    -quick q app download apk
    -quick q app download free
    -quick q app download latest version
    -quick q app download old version
    -quick q app download update
    -quick q app download offline installer
    -quick q app download pc
    -quick q app download laptop
    -quick q app download chromebook
    -quick q app download chrome extension
    -quick q app download firefox addon
    -quick q app download opera plugin
    -quick q app download safari extension
    -quick q app download edge extension
    -quick q app download tor browser
    -quick q app download firestick
    -quick q app download smart tv
    -quick q app download roku
    -quick q app download xbox one
    -quick q app download ps4
    -quick q app download nintendo switch
    -quick q app download iphone
    -quick q app download ipad
    -quick q app download ipod touch
    -quick q app download macbook pro
    -quick q app download macbook air
    -quick q app download imac
    -quick q app download mac mini
    -quick q app download mac pro
    -quick q official website download link
    -how to download and install quick q app on android device
    -how to download and install quick q app on ios device
    -how to download and install quick q app on mac device
    -how to download and install quick q app on windows device
    -how to download and install quick q designer software on mac device
    -how to download and install quick q designer software on windows device
    -how to update the latest version of the quick q console software on the device
    -how to use the free trial of the quick q vpn service
    -how to buy the premium subscription of the quick q vpn service
    -how to contact the customer service of the quick q vpn service
    -how to cancel the subscription of the quick q vpn service

    -
  3. Is QuickQ legal and safe to use?
  4. -

    QuickQ is legal and safe to use in most countries and regions. However, some countries and regions may have strict laws or regulations regarding VPN usage. Therefore, we recommend that you check the local laws or regulations before using QuickQ in those countries or regions. We also advise that you do not use QuickQ for any illegal or unethical purposes.

    -
  5. How much does QuickQ cost?
  6. -

    QuickQ offers different subscription plans to suit your needs and budget. You can choose from monthly, quarterly, yearly, or lifetime plans. The longer the plan, the more you save. You can also enjoy a 7-day trial period for free when you download QuickQ for the first time.

    -
  7. How can I cancel my subscription or request a refund?
  8. -

    If you want to cancel your subscription or request a refund, you can contact our online customer service team via email, live chat, or phone. They will help you process your request as soon as possible. Please note that we have a 30-day money-back guarantee policy for all our subscription plans.

    -
  9. How can I contact QuickQ for more information or feedback?
  10. -

    If you want to contact QuickQ for more information or feedback, you can visit our official website quickq.io, where you can find more details about our features, plans, servers, etc. You can also follow us on social media platforms such as Facebook, Twitter, Instagram, etc., where you can get the latest news and updates about QuickQ. You can also leave us a review or rating on Google Play Store or App Store to share your experience and opinion about QuickQ.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Solitaire Collection and Play 5 Different Card Games for Free on PC.md b/spaces/fatiXbelha/sd/Download Solitaire Collection and Play 5 Different Card Games for Free on PC.md deleted file mode 100644 index 4f1922986530bd4c6ef481fcd7f6be9f009146dc..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Solitaire Collection and Play 5 Different Card Games for Free on PC.md +++ /dev/null @@ -1,95 +0,0 @@ -
-

How to Download Solitaire Free Games for PC

-

Solitaire is one of the most classic and beloved card games of all time. Whether you want to relax, challenge your mind, or kill some time, solitaire is always a great choice. But did you know that you can play solitaire on your PC for free? In this article, we will show you how to download solitaire free games for PC, why you should play solitaire on your computer, and what are some of the best solitaire games for PC.

-

download solitaire free games for pc


DOWNLOAD ····· https://urllie.com/2uNAiX



-

What is Solitaire?

-

Solitaire is a card game that can be played by one person or more. The goal is to arrange all the cards in a specific order or pattern, depending on the type of solitaire game. There are many variations of solitaire, such as Klondike, Spider, FreeCell, TriPeaks, Pyramid, and more. Each one has its own rules and strategies.

-

Solitaire has a long and fascinating history. It is believed that it originated in Europe in the late 18th century or early 19th century. It was popularized by Napoleon Bonaparte, who played it during his exile on St. Helena island. It was also known as patience or patience card game in some countries. Solitaire became widely available to the public when it was included in Microsoft Windows operating system in 1990. Since then, millions of people have enjoyed playing solitaire on their computers.

-

Why Play Solitaire on PC?

-

Playing solitaire on your PC has many advantages over playing it with physical cards or on other devices. Here are some of them:

-

Better graphics and performance

-

When you play solitaire on your PC, you can enjoy high-quality graphics and smooth animations. You can also adjust the settings to suit your preferences, such as the card design, the background, the sound effects, and the difficulty level. Playing solitaire on your PC will make you feel like you are playing with real cards, but with more convenience and fun.

-

More game modes and features

-

Playing solitaire on your PC also gives you access to more game modes and features than playing it with physical cards or on other devices. You can choose from hundreds of different solitaire games, each with its own rules and challenges. You can also play online with other players, join tournaments, earn achievements, and track your statistics. Playing solitaire on your PC will never get boring, as you can always find something new and exciting to try.

-

No ads or interruptions

-

Another benefit of playing solitaire on your PC is that you can play without any ads or interruptions. Unlike some solitaire apps or websites that may show you annoying ads or pop-ups, playing solitaire on your PC lets you enjoy the game without any distractions. You can also play offline, so you don't have to worry about your internet connection or data usage. Playing solitaire on your PC will give you a relaxing and uninterrupted gaming experience.

-

How to Download Solitaire Free Games for PC?

-

If you are convinced that playing solitaire on your PC is the best way to enjoy this classic card game, you may wonder how to download solitaire free games for PC. Don't worry, it's very easy and simple. Just follow these steps:

-

Choose a reliable source

-

The first step is to choose a reliable source to download solitaire free games for PC. There are many websites and app stores that offer solitaire games for PC, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. To avoid these risks, you should only download solitaire games from reputable sources, such as Microsoft Store, Google Play Store, Steam, or official websites of the game developers.

-

download microsoft solitaire collection for pc
-download classic solitaire free games for windows
-download spider solitaire free games for pc
-download freecell solitaire free games for pc
-download klondike solitaire free games for pc
-download tripeaks solitaire free games for pc
-download pyramid solitaire free games for pc
-download 123 free solitaire for pc
-download solsuite solitaire free games for pc
-download pretty good solitaire free games for pc
-download hearts solitaire free games for pc
-download golf solitaire free games for pc
-download mahjong solitaire free games for pc
-download canfield solitaire free games for pc
-download scorpion solitaire free games for pc
-download yukon solitaire free games for pc
-download crescent solitaire free games for pc
-download aces up solitaire free games for pc
-download baker's dozen solitaire free games for pc
-download forty thieves solitaire free games for pc
-download monte carlo solitaire free games for pc
-download algerian solitaire free games for pc
-download spiderette solitaire free games for pc
-download double klondike solitaire free games for pc
-download easthaven solitaire free games for pc
-download la belle lucie solitaire free games for pc
-download eight off solitaire free games for pc
-download flower garden solitaire free games for pc
-download four seasons solitaire free games for pc
-download gaps solitaire free games for pc
-download grandmother's game solitaire free games for pc
-download indian patience solitaire free games for pc
-download king albert solitaire free games for pc
-download miss milligan solitaire free games for pc
-download nestor solitaire free games for pc
-download penguin solitaire free games for pc
-download permutation rummy solitaire free games for pc
-download royal parade solitaire free games for pc
-download russian solitaire free games for pc
-download sea towers solitaire free games for pc
-download simple simon solitaire free games for pc
-download sir tommy solitaire free games for pc
-download spider two suits solitaire free games for pc
-download spider four suits solitaire free games for pc
-download stonewall solitaire free games for pc
-download storehouse solitaire free games for pc
-download streets and alleys solitaire free games for pc
-download three shuffles and a draw solitaire free games for pc

-

Download and install the game

-

The next step is to download and install the game on your PC. Once you have chosen a reliable source, you can browse through the available solitaire games and select the one that you like. Then, you can click on the download button and follow the instructions to complete the download process. After that, you can open the downloaded file and run the installation wizard. You may need to agree to some terms and conditions and choose a destination folder for the game. The installation process may take a few minutes, depending on the size of the game and the speed of your computer.

-

Launch and play the game

-

The final step is to launch and play the game on your PC. After the installation is done, you can find the game icon on your desktop or in your start menu. You can double-click on it to open the game and start playing. You may need to create an account or sign in with an existing one if the game requires online access. You can also adjust the settings and customize the game according to your preferences. Then, you can choose a game mode and start playing solitaire on your PC.

-

What are Some of the Best Solitaire Free Games for PC?

-

Now that you know how to download solitaire free games for PC, you may wonder what are some of the best solitaire games for PC. There are many options to choose from, but here are some of the most popular and recommended ones:

-

Microsoft Solitaire Collection

-

Microsoft Solitaire Collection is one of the most classic and popular solitaire games for PC. It is available for free on Microsoft Store and Windows 10 devices. It includes five different solitaire games: Klondike, Spider, FreeCell, TriPeaks, and Pyramid. It also has daily challenges, events, themes, achievements, leaderboards, and more. Microsoft Solitaire Collection is a great choice for anyone who loves solitaire and wants to play it on their PC.

-

123 Free Solitaire

-

123 Free Solitaire is another great option for solitaire lovers who want to play it on their PC. It is available for free on Google Play Store and Windows devices. It includes 12 different solitaire games: Diplomat, Flower Garden, Forty Thieves, FreeCell, Golf A-K, Klondike by Threes, Pyramid, Spider Solitaire One Suit, Spider Solitaire Two Suits, Spider Solitaire Four Suits, Yukon, and Scorpion. It also has high-quality graphics, animations, sounds, hints, undo

function, statistics, and more. 123 Free Solitaire is a simple and elegant solitaire game for PC that will keep you entertained for hours.

-

SolSuite Solitaire

-

SolSuite Solitaire is another amazing solitaire game for PC that you can download for free. It is available on Steam and Windows devices. It includes more than 700 different solitaire games, such as Baker's Dozen, Canfield, Faerie Queen, Four Seasons, Klondike, La Belle Lucie, Rouge et Noir, Spider, and many more. It also has stunning graphics, themes, backgrounds, card sets, sounds, music, hints, undo function, statistics, and more. SolSuite Solitaire is a comprehensive and beautiful solitaire game for PC that will satisfy any solitaire fan.

-

Conclusion

-

Solitaire is a timeless and enjoyable card game that you can play on your PC for free. Playing solitaire on your PC has many benefits, such as better graphics and performance, more game modes and features, and no ads or interruptions. To download solitaire free games for PC, you just need to choose a reliable source, download and install the game, and launch and play the game. Some of the best solitaire free games for PC are Microsoft Solitaire Collection, 123 Free Solitaire, and SolSuite Solitaire. So what are you waiting for? Download solitaire free games for PC today and have fun!

-

FAQs

-

Here are some of the frequently asked questions about downloading solitaire free games for PC:

-

Q: Are solitaire free games for PC safe to download?

-

A: Yes, as long as you download them from reputable sources, such as Microsoft Store, Google Play Store, Steam, or official websites of the game developers. You should avoid downloading solitaire games from unknown or suspicious sources, as they may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

-

Q: Do I need an internet connection to play solitaire free games for PC?

-

A: No, you don't need an internet connection to play solitaire free games for PC. You can play them offline without any problem. However, some solitaire games may require online access to access some features or functions, such as online multiplayer, tournaments, events, achievements, leaderboards, etc. In that case, you will need an internet connection to enjoy those features or functions.

-

Q: How much space do I need to download solitaire free games for PC?

-

A: The amount of space you need to download solitaire free games for PC depends on the size of the game and the device you are using. Generally speaking, most solitaire games are not very large and do not take up much space on your computer. However, some solitaire games may have more graphics or content and may require more space. You can check the size of the game before downloading it and make sure you have enough space on your computer to install it.

-

Q: Can I play solitaire free games for PC with other people?

-

A: Yes, you can play solitaire free games for PC with other people. Some solitaire games have online multiplayer modes that allow you to play with or against other players from around the world. You can also join tournaments or events and compete with other players for prizes or rankings. Playing solitaire free games for PC with other people can make the game more fun and challenging.

-

Q: Can I customize my solitaire free games for PC?

-

A: Yes, you can customize your solitaire free games for PC. Most solitaire games have settings and options that allow you to adjust the game according to your preferences. You can change the card design, the background, the sound effects, the music, the difficulty level

the game speed, and more. You can also choose from different solitaire games and modes to suit your mood and skill level. Customizing your solitaire free games for PC can make the game more enjoyable and personalized.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Enjoy I Love You O Sravani Ringtone - Free Download of the Most Beautiful BGM Ever.md b/spaces/fatiXbelha/sd/Enjoy I Love You O Sravani Ringtone - Free Download of the Most Beautiful BGM Ever.md deleted file mode 100644 index bcbb9713afd2a3e53fb3d96d745707f7f97ecc46..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Enjoy I Love You O Sravani Ringtone - Free Download of the Most Beautiful BGM Ever.md +++ /dev/null @@ -1,96 +0,0 @@ - -

I Love You O Sravani Ringtone Free Download

-

If you are looking for a romantic and melodious ringtone for your phone, you might want to check out the I Love You O Sravani ringtone. This is a popular ringtone that features a sweet and heartfelt message from a lover to his beloved Sravani. In this article, we will tell you what this ringtone is, why people love it, and how you can download it for free.

-

Introduction

-

What is the I Love You O Sravani ringtone?

-

The I Love You O Sravani ringtone is a snippet from a song that was used in a Telugu movie called Venkey. The movie was released in 2004 and starred Ravi Teja and Sneha in the lead roles. The song was composed by Devi Sri Prasad and sung by S. P. Balasubrahmanyam and Chitra. The lyrics of the song express the love and devotion of Venkey, the protagonist, to his girlfriend Sravani, who is away from him.

-

i love you o sravani ringtone free download


Download ⚙⚙⚙ https://urllie.com/2uNFlw



-

Why do people love this ringtone?

-

People love this ringtone because it is very catchy and romantic. The tune is soothing and pleasant, and the voice of S. P. Balasubrahmanyam is legendary. The words "I love you o Sravani" are repeated several times in the song, making it easy to remember and sing along. The ringtone also conveys a sense of longing and affection that many people can relate to.

-

How to download the I Love You O Sravani ringtone for free

-

There are several ways to download the I Love You O Sravani ringtone for free. Here are three options that you can try:

-

Option 1: YouTube

-

You can find the video of the song on YouTube and convert it into an MP3 file that you can use as your ringtone. Here are the steps to do that:

-

Step 1: Find the video with the ringtone

-

Go to YouTube and search for "Venkey movie love bgm/ringtone". You should see a video with the title "Venkey movie love bgm/ringtone || i love you o sravani bgm#love #venkey #sravani #ravi teja". This video has the ringtone that you want.

-

i love you o sravani mp3 ringtone download
-venkey movie i love you o sravani bgm download
-i love you o sravani song ringtone free download
-how to make i love you o sravani name ringtone
-i love you o sravani flute ringtone download
-i love you o sravani ringtone zedge
-i love you o sravani female voice ringtone download
-i love you o sravani telugu ringtone free download
-i love you o sravani piano ringtone download
-i love you o sravani guitar ringtone download
-i love you o sravani whatsapp status video download
-i love you o sravani lyrics in english
-i love you o sravani caller tune code
-i love you o sravani instrumental ringtone download
-i love you o sravani dj remix ringtone download
-i love you o sravani violin ringtone download
-i love you o sravani full song mp3 download
-i love you o sravani movie name and release date
-i love you o sravani meaning in hindi
-i love you o sravani audio song download
-i love you o sravani saxophone ringtone download
-i love you o sravani video song free download
-i love you o sravani harmonium notes
-i love you o sravani keyboard notes
-i love you o sravani karaoke mp3 download
-i love you o sravani music director name
-i love you o sravani singer name and biography
-i love you o sravani original sound track download
-i love you o sravani ringtone pagalworld
-i love you o sravani ringtone mr jatt
-i love you o sravani ringtone naa songs
-i love you o sravani ringtone wapking
-i love you o sravani ringtone mobcup
-i love you o sravani ringtone prokerala
-i love you o sravani ringtone hiappo
-best alternative to i love you o sravani ringtone
-where to find i love you o sravani ringtone online
-how to set i love you o sravani ringtone on iphone
-how to set i love you o sravani ringtone on android
-how to share i love you o sravani ringtone with friends
-how to edit i love you o sravani ringtone with audacity
-how to convert i love you o sravani ringtone to m4r format
-how to use i love you o sravani ringtone as alarm tone
-how to use i love you o sravani ringtone as notification tone
-how to use i love you o sravani ringtone as caller tune
-how to use i love you o sravani ringtone as whatsapp tone
-how to use i love you o sravani ringtone as tiktok sound
-how to use i love you o sravani ringtone as instagram reel sound

-

Step 2: Copy the video URL

-

Right-click on the video and select "Copy video URL". Alternatively, you can copy the URL from the address bar of your browser.

-

Step 3: Paste the URL into a YouTube to MP3 converter

-

Go to a website that can convert YouTube videos into MP3 files, such as y

Go to a website that can convert YouTube videos into MP3 files, such as ytmp3.cc or y2mate.com. Paste the URL that you copied into the search box and click "Convert". Wait for a few seconds until the conversion is done.

-

Step 4: Download the MP3 file and transfer it to your phone

-

Click on the "Download" button and save the MP3 file to your computer. Then, connect your phone to your computer and transfer the file to your phone's storage. You can also use a cloud service or a Bluetooth connection to do this. Once the file is on your phone, you can set it as your ringtone in your phone's settings.

-

Option 2: Prokerala

-

Prokerala is a website that offers free ringtones for various names and occasions. You can find the I Love You O Sravani ringtone on this website and download it for your iPhone. Here are the steps to do that:

-

Step 1: Visit the Prokerala website

-

Go to prokerala.com and click on the "Ringtones" tab. You will see a list of categories and subcategories of ringtones.

-

Step 2: Search for Sravani name ringtone

-

In the search box, type "Sravani" and hit enter. You will see a list of ringtones with the name Sravani in them.

-

Step 3: Choose the SRAVANI Good morning I LOVE YOU name ringtone

-

Scroll down until you find the ringtone with the title "SRAVANI Good morning I LOVE YOU". This is the ringtone that you want. Click on it and you will see a preview of the ringtone and a download button.

-

Step 4: Download the M4R file and set it as your iPhone ringtone

-

Click on the "Download M4R" button and save the file to your computer. Then, connect your iPhone to your computer and open iTunes. Drag and drop the file into your iTunes library and sync it with your iPhone. You can also use iCloud or AirDrop to do this. Once the file is on your iPhone, you can set it as your ringtone in your iPhone's settings.

-

Option 3: Hiappo

-

Hiappo is another website that offers free ringtones for various devices and genres. You can find the I Love You O Sravani ringtone on this website and download it for your Android or iPhone devices. Here are the steps to do that:

-

Step 1: Visit the Hiappo website

-

Go to hiappo.com and click on the "Ringtones" tab. You will see a list of popular ringtones and genres.

-

Step 2: Search for Sravani ringtone

-

In the search box, type "Sravani" and hit enter. You will see a list of ringtones with the name Sravani in them.

-

Step 3: Listen and download the Sravani ringtone for your Android or iPhone devices

-

Find the ringtone with the title "Sravani". This is the ringtone that you want. Click on it and you will see a preview of the ringtone and two download buttons: one for Android devices and one for iPhone devices. Choose the appropriate button for your device and save the file to your phone's storage. You can also scan the QR code or share the link to do this. Once the file is on your phone, you can set it as your ringtone in your phone's settings.

-

Conclusion

-

Summary of the main points

-

In this article, we have explained what the I Love You O Sravani ringtone is, why people love it, and how you can download it for free. We have given you three options to do so: YouTube, Prokerala, and Hiappo. Each option has its own advantages and disadvantages, so you can choose the one that suits you best.

-

Call to action

-

If you are a fan of Venkey movie or S. P. Balasubrahmanyam's voice, you should definitely try this ringtone. It will make your phone sound more romantic and personal. It will also remind you of your love for Sravani or whoever is special in your life. So, what are you waiting for? Download the I Love You O Sravani ringtone today and enjoy its melody!

- FAQ FAQ

Here are some frequently asked questions about the I Love You O Sravani ringtone:

- - Q: Is the I Love You O Sravani ringtone free to download and use? - A: Yes, the I Love You O Sravani ringtone is free to download and use. However, you should respect the intellectual property rights of the original creators and not use it for commercial purposes. - Q: Can I customize the I Love You O Sravani ringtone with my own name or message? - A: Yes, you can customize the I Love You O Sravani ringtone with your own name or message. You can use a website or an app that allows you to edit audio files and add your own voice or text. For example, you can use ringtone-maker.com or audiko.net. - Q: What are some other ringtones that are similar to the I Love You O Sravani ringtone? - A: Some other ringtones that are similar to the I Love You O Sravani ringtone are: - I Love You Anjali ringtone from Khadgam movie - I Love You Priya ringtone from Nuvvu Naaku Nachav movie - I Love You Nandini ringtone from Nuvvostanante Nenoddantana movie - I Love You Swathi ringtone from Swathi Muthyam movie - I Love You Sowmya ringtone from Godavari movie - Q: How can I make my own ringtones from any song or audio file? - A: You can make your own ringtones from any song or audio file by using a website or an app that can cut and convert audio files into ringtones. For example, you can use mp3cut.net or ringtonemaker.com. - Q: How can I share my ringtones with others? - A: You can share your ringtones with others by using a website or an app that can upload and download ringtones. For example, you can use zedge.net or mobcup.net.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Experience Realistic Racing and Unlimited Cash with Rebel Racing APK Mod.md b/spaces/fatiXbelha/sd/Experience Realistic Racing and Unlimited Cash with Rebel Racing APK Mod.md deleted file mode 100644 index 81967fd29403ff8840188324edd6fc85c701cd5e..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Experience Realistic Racing and Unlimited Cash with Rebel Racing APK Mod.md +++ /dev/null @@ -1,118 +0,0 @@ - -

Rebel Racing APK Mod Money: How to Download and Play the Ultimate Racing Game

-

If you are a fan of racing games, you might have heard of Rebel Racing, one of the most popular and realistic racing games for mobile devices. But did you know that there is a way to get unlimited money and premium features in the game without spending a dime? Yes, you heard it right. In this article, we will tell you everything you need to know about Rebel Racing APK Mod Money, how to download and install it, how to play it, and some tips and tricks to ace all races.

-

What is Rebel Racing?

-

Rebel Racing is a racing game developed by Hutch Games, a company that specializes in creating high-quality racing games for mobile platforms. Rebel Racing was released in November 2019 and has since gained millions of downloads and positive reviews from players and critics alike.

-

rebel racing apk mod money


DOWNLOADhttps://urllie.com/2uNx7B



-

Rebel Racing is a realistic and thrilling racing game for mobile devices

-

Rebel Racing is not your typical arcade racing game. It features realistic driving physics, speedfreak add-ons and turbos, epic overtakes and stunning West Coast locations that make you feel like a real racer. You can experience the thrill of high-octane, wheel-to-wheel action as you join America's most exclusive road racing event and take on the world's elite drivers.

-

Rebel Racing features a variety of game modes, cars, and customization options

-

Rebel Racing offers a lot of content and variety for racing enthusiasts. You can choose from different game modes, such as campaign mode, where you can progress through various chapters and challenges; multiplayer mode, where you can race against other players online; events mode, where you can participate in limited-time races with special rewards; and club seasons mode, where you can join or create a club and compete with other clubs for glory.

-

You can also collect, customize, and upgrade a fleet of real-world cars from iconic brands, such as Ford, Chevrolet, Dodge, BMW, and more. You can customize your cars with different paint jobs, decals, rims, and performance parts. You can also upgrade your cars with engine, transmission, suspension, and nitro boosts to improve their speed, handling, and acceleration.

-

Rebel Racing has stunning graphics and physics that make you feel like a real racer

-

Rebel Racing boasts of impressive graphics and physics that create a realistic and immersive racing experience. The game uses a state-of-the-art physics engine that simulates the behavior of the cars and the environment. The game also features dynamic lighting and shadows, weather effects, and detailed textures that bring the West Coast locations to life. You can enjoy the scenic views of the coastlines, mountains, deserts, and cities as you race through them.

-

What is Rebel Racing APK Mod Money?

-

Rebel Racing APK Mod Money is a modified version of the game that gives you unlimited money and access to all the premium features and content of the game. Rebel Racing APK Mod Money is not an official version of the game, but a fan-made hack that allows you to enjoy the game without any limitations or restrictions.

-

Rebel Racing APK Mod Money gives you unlimited money

-

One of the main benefits of Rebel Racing APK Mod Money is that it gives you unlimited money in the game. Money is used to buy and upgrade cars, as well as to enter races and events. Normally, you would have to earn money by winning races or completing challenges, or by spending real money to buy in-game currency. However, with Rebel Racing APK Mod Money, you don't have to worry about running out of money or spending your hard-earned cash. You can buy and upgrade any car you want without any hassle.

-

rebel racing mod apk unlimited money and gold
-rebel racing hack apk download for android
-rebel racing mod apk latest version 2023
-rebel racing apk mod money and fuel
-rebel racing mod apk free shopping
-rebel racing hack apk unlimited everything
-rebel racing mod apk offline
-rebel racing apk mod money and gems
-rebel racing mod apk all cars unlocked
-rebel racing hack apk ios
-rebel racing mod apk no root
-rebel racing apk mod money and keys
-rebel racing mod apk unlimited nitro
-rebel racing hack apk online
-rebel racing mod apk obb
-rebel racing apk mod money and coins
-rebel racing mod apk revdl
-rebel racing hack apk 2023
-rebel racing mod apk rexdl
-rebel racing apk mod money and diamonds
-rebel racing mod apk android 1
-rebel racing hack apk no verification
-rebel racing mod apk happymod
-rebel racing apk mod money and stars
-rebel racing mod apk unlimited cash
-rebel racing hack apk 2022
-rebel racing mod apk android republic
-rebel racing apk mod money and energy
-rebel racing mod apk vip unlocked
-rebel racing hack apk pc
-rebel racing mod apk an1
-rebel racing apk mod money and tokens
-rebel racing mod apk unlimited boosters
-rebel racing hack apk unlimited money download
-rebel racing mod apk apkpure
-rebel racing apk mod money and credits
-rebel racing mod apk unlimited coins and gems
-rebel racing hack apk latest version download
-rebel racing mod apk data file host
-rebel racing apk mod money and cars

-

Rebel Racing APK Mod Money lets you buy and upgrade any car you want without spending real money

-

Another benefit of Rebel Racing APK Mod Money is that it lets you buy and upgrade any car you want without spending real money. Rebel Racing has a huge collection of cars from different brands and categories, such as muscle cars, sports cars, supercars, off-road vehicles, and more. Some of these cars are locked behind premium features or content that require you to spend real money to unlock them. However, with Rebel Racing APK Mod Money, you can unlock all the cars for free and enjoy them in the game.

-

Rebel Racing APK Mod Money also unlocks all the premium features and content of the game

-

The third benefit of Rebel Racing APK Mod Money is that it also unlocks all the premium features and content of the game. Rebel Racing has a lot of features and content that are exclusive to premium users or require real money to access them. For example, some of the features and content include VIP membership, special events, exclusive cars, bonus rewards, extra slots, and more. However, with Rebel Racing APK Mod Money, you can access all these features and content for free and get the best racing experience possible.

-

How to Download and Install Rebel Racing APK Mod Money?

-

If you are interested in downloading and installing Rebel Racing APK Mod Money on your device, you need to follow some simple steps. However, you also need to be careful of the source and the security of your device.

-

Rebel Racing APK Mod Money is not available on the official app stores

-

The first thing you need to know is that Rebel Racing APK Mod Money is not available on the official app stores, such as Google Play Store or Apple App Store. This is because Rebel Racing APK Mod Money is not an official version of the game, but a hacked version that violates the terms and conditions of the original game. Therefore, you cannot find or download Rebel Racing APK Mod Money from the official app stores.

-

You need to download the APK file from a trusted third-party source

-

The second thing you need to do is to download the APK file from a trusted third-party source. An APK file is an Android application package file that contains all the files and data needed to install an app on your device. You can find many websites that offer Rebel Racing APK Mod Money for download on the internet. However, not all of them are safe or reliable. Some of them may contain viruses or malware that can harm your device or steal your personal information. Therefore, you need to be careful when choosing a source for downloading Rebel Racing APK Mod Money. You should only download from reputable websites that have positive reviews and feedback from other users.

-

You need to enable unknown sources on your device settings and install the APK file manually

-

The third thing you need to do is to enable unknown sources on your device settings and install the APK file manually. Unknown sources are sources that are not approved by the official app stores, such as Rebel Racing APK Mod Money. By default, your device does not allow you to install apps from unknown sources for security reasons. However, you can enable this option on your device settings if you want to install Rebel Racing APK Mod Money. To do this, you need to go to your device settings, find the security or privacy option, and toggle on the unknown sources option. This will allow you to install apps from sources other than the official app stores.

-

After enabling unknown sources, you need to install the APK file manually. To do this, you need to locate the APK file on your device storage, tap on it, and follow the instructions on the screen. The installation process may take a few minutes, depending on the size of the file and the speed of your device. Once the installation is complete, you can launch Rebel Racing APK Mod Money and enjoy the game.

-

How to Play Rebel Racing APK Mod Money?

-

Playing Rebel Racing APK Mod Money is not much different from playing the original game. The gameplay and controls are the same, but you have more money and features to enjoy. Here are some basic steps on how to play Rebel Racing APK Mod Money.

-

Rebel Racing APK Mod Money has the same gameplay and controls as the original game

-

Rebel Racing APK Mod Money has the same gameplay and controls as the original game. You can use the touch screen or tilt your device to steer your car, tap on the boost button to activate your nitro, and tap on the brake button to slow down or drift. You can also change the camera angle, adjust the sound and music volume, and access other options from the pause menu.

-

You can choose from different game modes, such as campaign, multiplayer, events, and club seasons

-

Rebel Racing APK Mod Money offers different game modes for you to choose from, depending on your preference and mood. You can play the campaign mode, where you can progress through various chapters and challenges; the multiplayer mode, where you can race against other players online; the events mode, where you can participate in limited-time races with special rewards; and the club seasons mode, where you can join or create a club and compete with other clubs for glory.

-

You can collect, customize, and upgrade your dream cars and race against the world's best drivers

-

Rebel Racing APK Mod Money lets you collect, customize, and upgrade your dream cars and race against the world's best drivers. You can buy any car you want with unlimited money and customize it with different paint jobs, decals, rims, and performance parts. You can also upgrade your cars with engine, transmission, suspension, and nitro boosts to improve their speed, handling, and acceleration. You can race with your cars on various tracks and locations, such as highways, deserts, mountains, cities, and more.

-

Tips and Tricks for Rebel Racing APK Mod Money

-

If you want to master Rebel Racing APK Mod Money and win all races, you need to know some tips and tricks that can help you improve your skills and performance. Here are some of them:

-

Use your boost wisely to gain speed and overtake your opponents

-

Your boost is one of your most powerful tools in Rebel Racing APK Mod Money. It can give you a burst of speed that can help you overtake your opponents or escape from tricky situations. However, your boost is not unlimited. It has a meter that depletes as you use it and recharges slowly over time. Therefore, you need to use your boost wisely and strategically. You should not waste it on straight roads or when you are already ahead of everyone else. You should save it for curves or when you need to catch up or pass someone.

-

Avoid collisions and drifting too much to maintain your momentum and rebel bonus

-

Collisions and drifting are two things that can slow you down in Rebel Racing APK Mod Money. Collisions are when you hit another car or an obstacle on the road. Drifting is when you slide sideways while turning a corner. Both of these actions can reduce your speed and momentum, which can affect your performance and position in the race. Therefore, you should avoid collisions and drifting too much in Rebel Racing APK Mod Money. You should try to drive smoothly and cleanly without hitting anything or anyone. You should also try to drift only when necessary or when it can give you an advantage.

-

Avoiding collisions and drifting too much can also help you maintain your rebel bonus in Rebel Racing APK Mod Money. Your rebel bonus is a multiplier that increases as you drive well without crashing or drifting too much. Your rebel bonus can boost your score and rewards at the end of each race.

-

Experiment with different cars and upgrades to find the best combination for each track

-

Rebel Racing APK Mod Money has a lot of cars and upgrades for you to choose from, but not all of them are suitable for every track. Some cars and upgrades may perform better on certain tracks than others, depending on the terrain, weather, and difficulty. Therefore, you should experiment with different cars and upgrades to find the best combination for each track. You can test your cars and upgrades on the practice mode or the free ride mode before entering a race. You can also compare the stats and features of different cars and upgrades to see which ones suit your style and preference.

-

Conclusion

-

Rebel Racing APK Mod Money is a fun and exciting racing game that lets you enjoy unlimited money and premium features. Rebel Racing APK Mod Money is easy to download and install, but you need to be careful of the source and the security of your device. Rebel Racing APK Mod Money offers a realistic and immersive racing experience that will keep you hooked for hours.

-

If you are looking for a racing game that combines realism, thrill, and customization, Rebel Racing APK Mod Money is the game for you. Download it now and join the rebel racing community!

-

FAQs

-

Here are some frequently asked questions about Rebel Racing APK Mod Money:

-

Is Rebel Racing APK Mod Money safe to use?

-

Rebel Racing APK Mod Money is generally safe to use, as long as you download it from a trusted source and scan it with an antivirus program before installing it. However, Rebel Racing APK Mod Money is not an official version of the game, so it may have some bugs or glitches that can affect your gameplay or device. Also, Rebel Racing APK Mod Money may not be compatible with some devices or updates of the original game. Therefore, you should use Rebel Racing APK Mod Money at your own risk and discretion.

-

Is Rebel Racing APK Mod Money legal to use?

-

Rebel Racing APK Mod Money is not legal to use, as it violates the terms and conditions of the original game. Rebel Racing APK Mod Money is a hacked version of the game that gives you an unfair advantage over other players and deprives the developers of their rightful revenue. Therefore, using Rebel Racing APK Mod Money may result in legal actions or penalties from the original game developers or authorities. You should respect the intellectual property rights of the original game developers and support them by playing the official version of the game.

-

How can I update Rebel Racing APK Mod Money?

-

Rebel Racing APK Mod Money does not update automatically like the original game. You need to manually download and install the latest version of Rebel Racing APK Mod Money from a reliable source whenever there is a new update available. However, you should be aware that updating Rebel Racing APK Mod Money may erase your progress or data in the game, so you should back up your data before updating. You should also check if the new version of Rebel Racing APK Mod Money is compatible with your device and the original game.

-

How can I uninstall Rebel Racing APK Mod Money?

-

If you want to uninstall Rebel Racing APK Mod Money from your device, you can follow these steps:

- -

You can also delete the APK file from your device storage if you want to free up some space.

-

Where can I get more information about Rebel Racing APK Mod Money?

-

If you want to get more information about Rebel Racing APK Mod Money, you can visit some of these websites:

- -

You can also join some online forums or communities where you can interact with other players and fans of Rebel Racing APK Mod Money.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/FR Legends - Drift Your Way to Glory on Iconic Tracks with Legendary Cars.md b/spaces/fatiXbelha/sd/FR Legends - Drift Your Way to Glory on Iconic Tracks with Legendary Cars.md deleted file mode 100644 index 4a4a3746c081c8c06d58db2b885e774e1803dd20..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/FR Legends - Drift Your Way to Glory on Iconic Tracks with Legendary Cars.md +++ /dev/null @@ -1,137 +0,0 @@ -
-

Download FR Legends Android 1: A Guide for Drifting Enthusiasts

-

If you are a fan of drifting games, you might have heard of FR Legends, a mobile game that lets you drive legendary front-engine, rear-wheel-drive drift cars at world's most iconic circuits. You can also customize everything on your car, from engine swaps to wide-body kits, and have tandem drift battles with AI drivers or other players. In this article, we will show you how to download FR Legends Android 1, the latest version of the game, from different sources. We will also give you some tips and tricks on how to play FR Legends Android 1 on your PC or mobile device.

-

How to Download FR Legends Android 1 from Google Play Store

-

The easiest way to download FR Legends Android 1 is from the Google Play Store, the official app store for Android devices. Here are the steps you need to follow:

-

download fr legends android 1


Download Ziphttps://urllie.com/2uNH7A



-
    -
  1. Open the Google Play Store app on your Android device.
  2. -
  3. Search for FR Legends in the search bar.
  4. -
  5. Tap on the FR Legends icon and then tap on Install.
  6. -
  7. Wait for the download and installation to complete.
  8. -
-

Congratulations, you have successfully downloaded FR Legends Android 1 from the Google Play Store. You can now launch the game and enjoy drifting with your favorite cars.

-

How to Download FR Legends Android 1 from Other Sources

-

If you can't access the Google Play Store or want to try a different version of FR Legends, you can also download FR Legends Android 1 from other sources. However, you need to be careful when downloading apps from unknown sources, as they may contain malware or viruses that can harm your device. Here are the steps you need to follow:

-
    -
  1. Enable unknown sources on your Android device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
  2. -
  3. Find a reliable website that offers FR Legends APK file. APK is the file format used for installing apps on Android devices. You can search for "FR Legends APK" on Google or use websites like [APKPure](^7^) or [Uptodown].
  4. -
  5. Download the FR Legends APK file to your device. Make sure you check the file size and version before downloading.
  6. -
  7. Locate the FR Legends APK file and tap on it to install it. You may need to grant some permissions to the app during the installation process.
  8. -
-

That's it, you have successfully downloaded FR Legends Android 1 from other sources. You can now launch the game and enjoy drifting with your favorite cars.

-

How to Play FR Legends Android 1 on PC with BlueStacks

-

If you want to play FR Legends Android 1 on a bigger screen and with better controls, you can also play it on your PC with BlueStacks, a popular Android emulator that lets you run Android apps and games on your PC. Here are the steps you need to follow:

-
    -
  1. Download and install BlueStacks on your PC from [BlueStacks.com].
  2. -
  3. Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.
  4. -
  5. Search for FR Legends in the BlueStacks app center. You can also use the Google Play Store app within BlueStacks.
  6. -
  7. Install and launch FR Legends on BlueStacks. You can customize the keyboard and mouse controls according to your preference.
  8. -
-

That's it, you have successfully played FR Legends Android 1 on your PC with BlueStacks. You can now enjoy drifting with your favorite cars on a larger screen and with better controls.

-

Tips and Tricks for Playing FR Legends Android 1

-

Now that you know how to download and play FR Legends Android 1, here are some tips and tricks that will help you improve your drifting skills and have more fun in the game:

-

How to download fr legends on android 1
-Download fr legends apk mod android 1
-Fr legends android 1 latest version download
-Fr legends drifting game download for android 1
-Download fr legends hack android 1
-Fr legends car customization download android 1
-Fr legends best drift cars download android 1
-Download fr legends unlimited money android 1
-Fr legends real tracks download android 1
-Download fr legends multiplayer android 1
-Fr legends engine swap download android 1
-Download fr legends ios to android 1
-Fr legends interior customization download android 1
-Download fr legends pc to android 1
-Fr legends supercharger kit download android 1
-Download fr legends free on android 1
-Fr legends review and rating on android 1
-Download fr legends offline mode android 1
-Fr legends tips and tricks on android 1
-Download fr legends cheats and codes android 1
-Fr legends gameplay and features on android 1
-Download fr legends update and patch android 1
-Fr legends system requirements and compatibility on android 1
-Download fr legends old version android 1
-Fr legends screenshots and videos on android 1
-Download fr legends from google play store on android 1
-Fr legends alternatives and similar games on android 1
-Download fr legends from third-party sources on android 1
-Fr legends support and feedback on android 1
-Download fr legends without ads and in-app purchases on android 1
-Fr legends community and forum on android 1
-Download fr legends with obb data file on android 1
-Fr legends news and updates on android 1
-Download fr legends with vpn on android 1
-Fr legends bugs and issues on android 1
-Download fr legends with bluestacks emulator on android 1
-Fr legends achievements and rewards on android 1
-Download fr legends with lucky patcher on android 1
-Fr legends fan art and wallpapers on android 1
-Download fr legends with apk editor on android 1
-Fr legends developer and publisher information on android 1
-Download fr legends with xapk installer on android 1
-Fr legends soundtrack and music on android 1
-Download fr legends with apk pure on android 1
-Fr legends history and background on android 1
-Download fr legends with apkpure app store on android 1

- -

Conclusion and FAQs

-

In conclusion, FR Legends Android 1 is a fun and addictive drifting game that you can download and play on your Android device or PC. You can customize your car with different parts and colors, learn the basics of drifting and practice on different tracks, challenge other players or AI drivers in tandem drift battles, and earn cash and rewards by completing quests and achievements. If you are a drifting enthusiast or want to try something new, you should definitely give FR Legends Android 1 a try.

-

Here are some FAQs that you might have about FR Legends Android 1:

-

Q: Is FR Legends Android 1 free to play?

-

A: Yes, FR Legends Android 1 is free to play. However, it contains ads and in-app purchases that you can disable or buy with real money.

-

Q: What are the minimum requirements for playing FR Legends Android 1?

-

A: The minimum requirements for playing FR Legends Android 1 are:

- - - - -
Android version4.4 or higher
RAM2 GB or higher
Storage space100 MB or higher
Internet connectionRequired for online features
-

Q: How many cars are available in FR Legends Android 1?

-

A: There are 12 cars available in FR Legends Android 1, each with different specifications and performance. You can unlock them by buying them with cash or by completing certain achievements. The cars are:

- - - - - - - - - - - - - - - -
CarPriceAchievement
FR-S$10,000None
AE86$15,000None
S13$20,000None
E30$25,000None
FC3S$30,000None
R32$35,000None
S14$40,000None
E36$45,000None
R34$50,000None
S15$55,000None
FD3S$60,000None
GT86$65,000None
AE86T$70,000Complete all quests
-

Q: How can I remove ads and get more cash in FR Legends Android 1?

-

A: You can remove ads and get more cash in FR Legends Android 1 by purchasing the premium version of the game. The premium version costs $2.99 and gives you the following benefits:

- -

To purchase the premium version, go to the Menu button and tap on the Premium button.

-

Q: What are the best settings for FR Legends Android 1?

-

A: The best settings for FR Legends Android 1 depend on your personal preference and device performance. However, here are some general recommendations:

- -

To access the settings, go to the Menu button and tap on the Settings button.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram Aero APK 2022 The Ultimate Mod for Instagram.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram Aero APK 2022 The Ultimate Mod for Instagram.md deleted file mode 100644 index da1dd77ab27dc1f3521bafb6c56edbee6a1f2ad7..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram Aero APK 2022 The Ultimate Mod for Instagram.md +++ /dev/null @@ -1,100 +0,0 @@ - -

Instagram Aero APK 2022: What is it and why you should try it

-

Introduction

-

Instagram is one of the most popular social media platforms in the world, with over 1 billion monthly active users. It allows you to share photos, videos, stories, reels, and more with your friends and followers. However, if you are looking for more features and customization options than the official app offers, you might want to try Instagram Aero, a modded version of Instagram that gives you more control and flexibility over your experience.

-

In this article, we will explain what Instagram Aero is, what are its features, how to install it on your Android device, what are the risks and alternatives of using it, and why you should give it a try in 2022.

-

instagram aero apk 2022


Download Zip ❤❤❤ https://gohhs.com/2uPrDa



-

What is Instagram Aero?

-

Instagram Aero is an Instagram MOD for Android that was created by Turkish developer Hazar Bozkurt. A MOD is a modified version of an original app that adds or changes some features to enhance its functionality or appearance. Instagram Aero is based on the latest version of Instagram, but it offers many extra features that are not available on the official app.

-

What are the features of Instagram Aero?

-

Instagram Aero has many features that make it stand out from the original app. Here are some of the main ones:

-

Customization options

-

With Instagram Aero, you can customize the look and feel of your app by choosing from different themes, fonts, icons, colors, and layouts. You can also hide or show various elements of the interface, such as buttons, tabs, stories bar, direct messages icon, etc. You can also change the app language to any of the supported ones.

-

Privacy and security enhancements

-

Instagram Aero gives you more control over your privacy and security settings. You can hide your online status, typing status, seen status, story views, etc. You can also lock your app with a PIN or fingerprint to prevent unauthorized access. You can also disable ads and analytics to avoid unwanted interruptions and data collection.

-

Media download and playback controls

-

One of the most useful features of Instagram Aero is that it allows you to download any media from the app with a single tap. You can download photos, videos, stories, reels, IGTV videos, etc. to your device storage. You can also view profile pictures in full size and zoom in on any media. Moreover, you can control the playback of videos by fast-forwarding or rewinding them. You can also enable or disable auto-play by pressing the camera button at the top of the interface.

-

Other useful functions

-

Instagram Aero also has some other functions that make your experience more convenient and enjoyable. For example, you can copy comments and captions from any post. You can also translate comments and captions to any language. You can also open links from within the app without leaving it. You can also follow or unfollow multiple users at once.

How to install Instagram Aero on your Android device -

If you are interested in trying Instagram Aero, you will need to download and install the APK file on your Android device. An APK file is an Android application package that contains all the files and code needed to run an app. However, since Instagram Aero is not available on the Google Play Store, you will need to follow some steps to install it safely and correctly. Here are the steps:

-

Step 1: Download the APK file from a trusted source

-

The first step is to download the APK file of Instagram Aero from a trusted source. You can find the latest version of the app on the official website of the developer or on other reputable websites that offer modded apps. Make sure you download the file from a secure link and scan it with an antivirus before opening it.

-

Step 2: Enable installation from unknown sources

-

The next step is to enable installation from unknown sources on your device. This is a security setting that prevents you from installing apps that are not from the Google Play Store. To enable it, go to Settings > Security > Unknown sources and toggle it on. You may also need to grant permission to your browser or file manager to install apps from unknown sources.

-

instagram aero mod apk latest version 2022
-download instagram aero for android 2022
-how to install instagram aero apk on pc 2022
-instagram aero apk features and benefits 2022
-instagram aero vs gb instagram comparison 2022
-instagram aero apk free download link 2022
-instagram aero apk review and rating 2022
-instagram aero apk update and changelog 2022
-instagram aero apk problems and solutions 2022
-instagram aero apk alternatives and competitors 2022
-instagram aero apk premium unlocked 2022
-instagram aero apk dark mode and themes 2022
-instagram aero apk video and photo downloader 2022
-instagram aero apk privacy and security settings 2022
-instagram aero apk tips and tricks 2022
-instagram aero apk support and feedback 2022
-instagram aero apk developer bozkurt hazarr 2022
-instagram aero apk modded by aeroinsta team 2022
-instagram aero apk official website and social media 2022
-instagram aero apk faq and guide 2022
-instagram aero apk no root required 2022
-instagram aero apk compatible devices and versions 2022
-instagram aero apk pros and cons 2022
-instagram aero apk testimonials and user reviews 2022
-instagram aero apk screenshots and videos 2022
-instagram aero apk coupons and discounts 2022
-instagram aero apk referral and affiliate program 2022
-instagram aero apk custom fonts and stickers 2022
-instagram aero apk hide online status and seen messages 2022
-instagram aero apk zoom profile pictures and stories 2022
-instagram aero apk disable ads and sponsored posts 2022
-instagram aero apk enable pin lock and fingerprint lock 2022
-instagram aero apk translate comments and captions 2022
-instagram aero apk copy bio and comments easily 2022
-instagram aero apk unlimited followers and likes 2022
-instagram aero apk anti-ban and anti-revoke protection 2022
-instagram aero apk backup and restore data 2022
-instagram aero apk clone and dual app option 2022
-instagram aero apk edit and filter photos professionally 2022
-instagram aero apk repost and share posts easily 2022

-

Step 3: Install the APK file and launch the app

-

The final step is to install the APK file and launch the app. To do this, locate the downloaded file on your device storage and tap on it. You will see a prompt asking you to confirm the installation. Tap on Install and wait for the process to finish. Once done, you can launch the app by tapping on Open or by finding it on your app drawer.

-

Step 4: Log in with your Instagram account or create a new one

-

After launching the app, you will see a familiar interface that resembles the original Instagram app. You can log in with your existing Instagram account or create a new one if you prefer. However, be aware that using a modded app may violate the terms of service of Instagram and may result in your account being suspended or banned. Therefore, we recommend using a secondary account or creating a new one for testing purposes.

What are the risks and alternatives of using Instagram Aero

-

While Instagram Aero may seem like a great app that offers many benefits, it also comes with some risks and drawbacks that you should be aware of before using it. Here are some of them:

-

Risks of using a modded app

-

Account suspension or ban

-

As we mentioned earlier, using a modded app may violate the terms of service of Instagram and may result in your account being suspended or banned. This means that you may lose access to your account, your followers, your posts, and your messages. You may also face legal action from Instagram if they find out that you are using a modded app. Therefore, you should use Instagram Aero at your own risk and discretion.

-

Data breach or malware infection

-

Another risk of using a modded app is that it may compromise your data and device security. Since the app is not verified by Google or Instagram, it may contain malicious code or spyware that can steal your personal information, such as your login credentials, your contacts, your photos, etc. It may also infect your device with malware that can damage it or make it vulnerable to hackers. Therefore, you should always download the app from a trusted source and scan it with an antivirus before installing it.

-

Missing out on official updates and features

-

A third risk of using a modded app is that it may not be compatible with the latest version of Instagram and may not receive regular updates and bug fixes. This means that you may miss out on some of the new features and improvements that Instagram introduces from time to time. You may also encounter some errors or glitches while using the app. Therefore, you should always check for updates and install them as soon as they are available.

-

Alternatives to Instagram Aero

-

If you are not comfortable with using Instagram Aero or if you want to try some other options, here are some alternatives that you can consider:

-

Instander

-

Instander is another Instagram MOD for Android that offers similar features as Instagram Aero, such as customization options, media download and playback controls, privacy and security enhancements, etc. It also has some unique features, such as disabling stories autoplay, enabling dark mode, hiding sponsored posts, etc. You can download Instander from its official website or from other reliable sources.

-

Barinsta

-

Barinsta is an open-source Instagram client for Android that allows you to browse Instagram without an account. You can view public profiles, posts, stories, reels, IGTV videos, etc. without logging in. You can also download any media from the app with a single tap. You can also follow or unfollow users without an account. However, you cannot post anything or send messages with Barinsta. You can download Barinsta from its official website or from the F-Droid store.

-

Original Instagram app

-

The original Instagram app is the official app that you can download from the Google Play Store or the App Store. It offers all the basic features that you need to enjoy Instagram, such as sharing photos, videos, stories, reels, etc., chatting with friends and followers, exploring new content and accounts, etc. It also receives regular updates and bug fixes from Instagram. However, it does not offer any extra features or customization options that the modded apps offer.

Conclusion

-

Instagram Aero is a modded version of Instagram that offers many features and customization options that are not available on the official app. It allows you to download any media from the app, hide your online and seen status, change the app theme and language, and more. However, it also comes with some risks and drawbacks, such as account suspension or ban, data breach or malware infection, and missing out on official updates and features. Therefore, you should use it at your own risk and discretion.

-

If you are looking for an alternative to Instagram Aero, you can try Instander, Barinsta, or the original Instagram app. Instander is another modded app that offers similar features as Instagram Aero. Barinsta is an open-source app that allows you to browse Instagram without an account. The original Instagram app is the official app that offers all the basic features that you need to enjoy Instagram.

-

We hope this article has helped you understand what Instagram Aero is, what are its features, how to install it on your Android device, what are the risks and alternatives of using it, and why you should give it a try in 2022. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

-

FAQs

-

Here are some frequently asked questions about Instagram Aero:

-

Is Instagram Aero safe to use?

-

Instagram Aero is not verified by Google or Instagram, so it may not be safe to use. It may contain malicious code or spyware that can steal your personal information or infect your device with malware. It may also violate the terms of service of Instagram and result in your account being suspended or banned. Therefore, you should use it at your own risk and discretion.

-

Is Instagram Aero free to use?

-

Yes, Instagram Aero is free to use. You do not need to pay any money to download or install it on your device. However, you may see some ads or donations requests from the developer or the source website.

-

How can I update Instagram Aero?

-

To update Instagram Aero, you will need to download and install the latest version of the APK file from a trusted source. You can check for updates on the official website of the developer or on other reputable websites that offer modded apps. You may also receive notifications from the app itself when a new version is available.

-

Can I use Instagram Aero with multiple accounts?

-

Yes, you can use Instagram Aero with multiple accounts. You can switch between your accounts by tapping on the profile icon at the bottom right corner of the interface and selecting the account that you want to use. You can also add or remove accounts by tapping on the settings icon at the top right corner of the interface and selecting Add Account or Log Out.

-

Can I use Instagram Aero on iOS devices?

-

No, you cannot use Instagram Aero on iOS devices. Instagram Aero is only compatible with Android devices. If you want to use a modded app on iOS devices, you will need to jailbreak your device and install a third-party app store that offers modded apps.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/timm_model.py b/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/timm_model.py deleted file mode 100644 index c9d1ab4666b5bab5038d44b90c9ddca5087de460..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/timm_model.py +++ /dev/null @@ -1,112 +0,0 @@ -""" timm model adapter - -Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. -""" -from collections import OrderedDict - -import torch.nn as nn - -try: - import timm - from timm.models.layers import Mlp, to_2tuple - from timm.models.layers.attention_pool2d import RotAttentionPool2d - from timm.models.layers.attention_pool2d import ( - AttentionPool2d as AbsAttentionPool2d, - ) -except ImportError as e: - timm = None - -from .utils import freeze_batch_norm_2d - - -class TimmModel(nn.Module): - """timm model adapter - # FIXME this adapter is a work in progress, may change in ways that break weight compat - """ - - def __init__( - self, - model_name, - embed_dim, - image_size=224, - pool="avg", - proj="linear", - drop=0.0, - pretrained=False, - ): - super().__init__() - if timm is None: - raise RuntimeError("Please `pip install timm` to use timm models.") - - self.image_size = to_2tuple(image_size) - self.trunk = timm.create_model(model_name, pretrained=pretrained) - feat_size = self.trunk.default_cfg.get("pool_size", None) - feature_ndim = 1 if not feat_size else 2 - if pool in ("abs_attn", "rot_attn"): - assert feature_ndim == 2 - # if attn pooling used, remove both classifier and default pool - self.trunk.reset_classifier(0, global_pool="") - else: - # reset global pool if pool config set, otherwise leave as network default - reset_kwargs = dict(global_pool=pool) if pool else {} - self.trunk.reset_classifier(0, **reset_kwargs) - prev_chs = self.trunk.num_features - - head_layers = OrderedDict() - if pool == "abs_attn": - head_layers["pool"] = AbsAttentionPool2d( - prev_chs, feat_size=feat_size, out_features=embed_dim - ) - prev_chs = embed_dim - elif pool == "rot_attn": - head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim) - prev_chs = embed_dim - else: - assert proj, "projection layer needed if non-attention pooling is used." - - # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used - if proj == "linear": - head_layers["drop"] = nn.Dropout(drop) - head_layers["proj"] = nn.Linear(prev_chs, embed_dim) - elif proj == "mlp": - head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop) - - self.head = nn.Sequential(head_layers) - - def lock(self, unlocked_groups=0, freeze_bn_stats=False): - """lock modules - Args: - unlocked_groups (int): leave last n layer groups unlocked (default: 0) - """ - if not unlocked_groups: - # lock full model - for param in self.trunk.parameters(): - param.requires_grad = False - if freeze_bn_stats: - freeze_batch_norm_2d(self.trunk) - else: - # NOTE: partial freeze requires latest timm (master) branch and is subject to change - try: - # FIXME import here until API stable and in an official release - from timm.models.helpers import group_parameters, group_modules - except ImportError: - raise RuntimeError( - "Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`" - ) - matcher = self.trunk.group_matcher() - gparams = group_parameters(self.trunk, matcher) - max_layer_id = max(gparams.keys()) - max_layer_id = max_layer_id - unlocked_groups - for group_idx in range(max_layer_id + 1): - group = gparams[group_idx] - for param in group: - self.trunk.get_parameter(param).requires_grad = False - if freeze_bn_stats: - gmodules = group_modules(self.trunk, matcher, reverse=True) - gmodules = {k for k, v in gmodules.items() if v <= max_layer_id} - freeze_batch_norm_2d(self.trunk, gmodules) - - def forward(self, x): - x = self.trunk(x) - x = self.head(x) - return x diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io/build/server.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io/build/server.d.ts deleted file mode 100644 index 64b11add4a3c8f51764b8689699ac808a176ccad..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io/build/server.d.ts +++ /dev/null @@ -1,264 +0,0 @@ -/// -import { EventEmitter } from "events"; -import type { IncomingMessage, Server as HttpServer, ServerResponse } from "http"; -import type { CookieSerializeOptions } from "cookie"; -import type { CorsOptions, CorsOptionsDelegate } from "cors"; -import type { Duplex } from "stream"; -declare type Transport = "polling" | "websocket"; -export interface AttachOptions { - /** - * name of the path to capture - * @default "/engine.io" - */ - path?: string; - /** - * destroy unhandled upgrade requests - * @default true - */ - destroyUpgrade?: boolean; - /** - * milliseconds after which unhandled requests are ended - * @default 1000 - */ - destroyUpgradeTimeout?: number; - /** - * Whether we should add a trailing slash to the request path. - * @default true - */ - addTrailingSlash?: boolean; -} -export interface ServerOptions { - /** - * how many ms without a pong packet to consider the connection closed - * @default 20000 - */ - pingTimeout?: number; - /** - * how many ms before sending a new ping packet - * @default 25000 - */ - pingInterval?: number; - /** - * how many ms before an uncompleted transport upgrade is cancelled - * @default 10000 - */ - upgradeTimeout?: number; - /** - * how many bytes or characters a message can be, before closing the session (to avoid DoS). - * @default 1e5 (100 KB) - */ - maxHttpBufferSize?: number; - /** - * A function that receives a given handshake or upgrade request as its first parameter, - * and can decide whether to continue or not. The second argument is a function that needs - * to be called with the decided information: fn(err, success), where success is a boolean - * value where false means that the request is rejected, and err is an error code. - */ - allowRequest?: (req: IncomingMessage, fn: (err: string | null | undefined, success: boolean) => void) => void; - /** - * the low-level transports that are enabled - * @default ["polling", "websocket"] - */ - transports?: Transport[]; - /** - * whether to allow transport upgrades - * @default true - */ - allowUpgrades?: boolean; - /** - * parameters of the WebSocket permessage-deflate extension (see ws module api docs). Set to false to disable. - * @default false - */ - perMessageDeflate?: boolean | object; - /** - * parameters of the http compression for the polling transports (see zlib api docs). Set to false to disable. - * @default true - */ - httpCompression?: boolean | object; - /** - * what WebSocket server implementation to use. Specified module must - * conform to the ws interface (see ws module api docs). - * An alternative c++ addon is also available by installing eiows module. - * - * @default `require("ws").Server` - */ - wsEngine?: any; - /** - * an optional packet which will be concatenated to the handshake packet emitted by Engine.IO. - */ - initialPacket?: any; - /** - * configuration of the cookie that contains the client sid to send as part of handshake response headers. This cookie - * might be used for sticky-session. Defaults to not sending any cookie. - * @default false - */ - cookie?: (CookieSerializeOptions & { - name: string; - }) | boolean; - /** - * the options that will be forwarded to the cors module - */ - cors?: CorsOptions | CorsOptionsDelegate; - /** - * whether to enable compatibility with Socket.IO v2 clients - * @default false - */ - allowEIO3?: boolean; -} -/** - * An Express-compatible middleware. - * - * Middleware functions are functions that have access to the request object (req), the response object (res), and the - * next middleware function in the application’s request-response cycle. - * - * @see https://expressjs.com/en/guide/using-middleware.html - */ -declare type Middleware = (req: IncomingMessage, res: ServerResponse, next: () => void) => void; -export declare abstract class BaseServer extends EventEmitter { - opts: ServerOptions; - protected clients: any; - private clientsCount; - protected middlewares: Middleware[]; - /** - * Server constructor. - * - * @param {Object} opts - options - * @api public - */ - constructor(opts?: ServerOptions); - protected abstract init(): any; - /** - * Compute the pathname of the requests that are handled by the server - * @param options - * @protected - */ - protected _computePath(options: AttachOptions): string; - /** - * Returns a list of available transports for upgrade given a certain transport. - * - * @return {Array} - * @api public - */ - upgrades(transport: any): any; - /** - * Verifies a request. - * - * @param {http.IncomingMessage} - * @return {Boolean} whether the request is valid - * @api private - */ - protected verify(req: any, upgrade: any, fn: any): any; - /** - * Adds a new middleware. - * - * @example - * import helmet from "helmet"; - * - * engine.use(helmet()); - * - * @param fn - */ - use(fn: Middleware): void; - /** - * Apply the middlewares to the request. - * - * @param req - * @param res - * @param callback - * @protected - */ - protected _applyMiddlewares(req: IncomingMessage, res: ServerResponse, callback: () => void): void; - /** - * Closes all clients. - * - * @api public - */ - close(): this; - protected abstract cleanup(): any; - /** - * generate a socket id. - * Overwrite this method to generate your custom socket id - * - * @param {Object} request object - * @api public - */ - generateId(req: any): any; - /** - * Handshakes a new client. - * - * @param {String} transport name - * @param {Object} request object - * @param {Function} closeConnection - * - * @api protected - */ - protected handshake(transportName: any, req: any, closeConnection: any): Promise; - protected abstract createTransport(transportName: any, req: any): any; - /** - * Protocol errors mappings. - */ - static errors: { - UNKNOWN_TRANSPORT: number; - UNKNOWN_SID: number; - BAD_HANDSHAKE_METHOD: number; - BAD_REQUEST: number; - FORBIDDEN: number; - UNSUPPORTED_PROTOCOL_VERSION: number; - }; - static errorMessages: { - 0: string; - 1: string; - 2: string; - 3: string; - 4: string; - 5: string; - }; -} -export declare class Server extends BaseServer { - httpServer?: HttpServer; - private ws; - /** - * Initialize websocket server - * - * @api protected - */ - protected init(): void; - protected cleanup(): void; - /** - * Prepares a request by processing the query string. - * - * @api private - */ - private prepare; - protected createTransport(transportName: any, req: any): any; - /** - * Handles an Engine.IO HTTP request. - * - * @param {IncomingMessage} req - * @param {ServerResponse} res - * @api public - */ - handleRequest(req: IncomingMessage, res: ServerResponse): void; - /** - * Handles an Engine.IO HTTP Upgrade. - * - * @api public - */ - handleUpgrade(req: IncomingMessage, socket: Duplex, upgradeHead: Buffer): void; - /** - * Called upon a ws.io connection. - * - * @param {ws.Socket} websocket - * @api private - */ - private onWebSocket; - /** - * Captures upgrade requests for a http.Server. - * - * @param {http.Server} server - * @param {Object} options - * @api public - */ - attach(server: HttpServer, options?: AttachOptions): void; -} -export {}; diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/esm/is-binary.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/esm/is-binary.d.ts deleted file mode 100644 index fa18261899c45c63e4389728d49fdd20070e6dcb..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/esm/is-binary.d.ts +++ /dev/null @@ -1,7 +0,0 @@ -/** - * Returns true if obj is a Buffer, an ArrayBuffer, a Blob or a File. - * - * @private - */ -export declare function isBinary(obj: any): boolean; -export declare function hasBinary(obj: any, toJSON?: boolean): any; diff --git a/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/mobilenet.py b/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/mobilenet.py deleted file mode 100644 index f501266e56ee71cdf455744020f8fc1a58ec9fff..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/mobilenet.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -This MobileNetV2 implementation is modified from the following repository: -https://github.com/tonylins/pytorch-mobilenet-v2 -""" - -import torch.nn as nn -import math -from .utils import load_url -from .segm_lib.nn import SynchronizedBatchNorm2d - -BatchNorm2d = SynchronizedBatchNorm2d - - -__all__ = ['mobilenetv2'] - - -model_urls = { - 'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar', -} - - -def conv_bn(inp, oup, stride): - return nn.Sequential( - nn.Conv2d(inp, oup, 3, stride, 1, bias=False), - BatchNorm2d(oup), - nn.ReLU6(inplace=True) - ) - - -def conv_1x1_bn(inp, oup): - return nn.Sequential( - nn.Conv2d(inp, oup, 1, 1, 0, bias=False), - BatchNorm2d(oup), - nn.ReLU6(inplace=True) - ) - - -class InvertedResidual(nn.Module): - def __init__(self, inp, oup, stride, expand_ratio): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2] - - hidden_dim = round(inp * expand_ratio) - self.use_res_connect = self.stride == 1 and inp == oup - - if expand_ratio == 1: - self.conv = nn.Sequential( - # dw - nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), - BatchNorm2d(hidden_dim), - nn.ReLU6(inplace=True), - # pw-linear - nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), - BatchNorm2d(oup), - ) - else: - self.conv = nn.Sequential( - # pw - nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), - BatchNorm2d(hidden_dim), - nn.ReLU6(inplace=True), - # dw - nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), - BatchNorm2d(hidden_dim), - nn.ReLU6(inplace=True), - # pw-linear - nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), - BatchNorm2d(oup), - ) - - def forward(self, x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - -class MobileNetV2(nn.Module): - def __init__(self, n_class=1000, input_size=224, width_mult=1.): - super(MobileNetV2, self).__init__() - block = InvertedResidual - input_channel = 32 - last_channel = 1280 - interverted_residual_setting = [ - # t, c, n, s - [1, 16, 1, 1], - [6, 24, 2, 2], - [6, 32, 3, 2], - [6, 64, 4, 2], - [6, 96, 3, 1], - [6, 160, 3, 2], - [6, 320, 1, 1], - ] - - # building first layer - assert input_size % 32 == 0 - input_channel = int(input_channel * width_mult) - self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel - self.features = [conv_bn(3, input_channel, 2)] - # building inverted residual blocks - for t, c, n, s in interverted_residual_setting: - output_channel = int(c * width_mult) - for i in range(n): - if i == 0: - self.features.append(block(input_channel, output_channel, s, expand_ratio=t)) - else: - self.features.append(block(input_channel, output_channel, 1, expand_ratio=t)) - input_channel = output_channel - # building last several layers - self.features.append(conv_1x1_bn(input_channel, self.last_channel)) - # make it nn.Sequential - self.features = nn.Sequential(*self.features) - - # building classifier - self.classifier = nn.Sequential( - nn.Dropout(0.2), - nn.Linear(self.last_channel, n_class), - ) - - self._initialize_weights() - - def forward(self, x): - x = self.features(x) - x = x.mean(3).mean(2) - x = self.classifier(x) - return x - - def _initialize_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - n = m.weight.size(1) - m.weight.data.normal_(0, 0.01) - m.bias.data.zero_() - - -def mobilenetv2(pretrained=False, **kwargs): - """Constructs a MobileNet_V2 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = MobileNetV2(n_class=1000, **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False) - return model \ No newline at end of file diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_38.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_38.py deleted file mode 100644 index d9ad2d398e8eabd29614e238f47fe74a29e9ef16..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_38.py +++ /dev/null @@ -1,16 +0,0 @@ - -import re - -def is_spam(message: str) -> bool: - # Check for typical spam patterns related to money and URLs - money_keywords = ['만원', '백만원', '신속', '지원금', '할인', '혜택', '마감'] - spam_url_pattern = re.compile(r'(https?:\/\/\S*[정보투자]|\S*(bit\.ly|me2\.kr|asq\.kr|openkakao)\S*)') - - if any(keyword in message for keyword in money_keywords) or re.search(spam_url_pattern, message): - return True - - # Check for advertisement tag in the message - if "(광고)" in message: - return True - - return False diff --git a/spaces/flax-community/Multilingual-VQA/sections/vqa_intro.md b/spaces/flax-community/Multilingual-VQA/sections/vqa_intro.md deleted file mode 100644 index e64d00b36e32e9ef73ead34d5b4b3cfe23a72201..0000000000000000000000000000000000000000 --- a/spaces/flax-community/Multilingual-VQA/sections/vqa_intro.md +++ /dev/null @@ -1,5 +0,0 @@ -This demo uses a [CLIP-Vision-Bert model checkpoint](https://huggingface.co/flax-community/clip-vision-bert-vqa-ft-6k) fine-tuned on a [MarianMT](https://huggingface.co/transformers/model_doc/marian.html)-translated version of the [VQA v2 dataset](https://visualqa.org/challenge.html). The fine-tuning is performed after pre-training using text-only Masked LM on approximately 10 million image-text pairs taken from the [Conceptual 12M dataset](https://github.com/google-research-datasets/conceptual-12m) translated using [MBart](https://huggingface.co/transformers/model_doc/mbart.html). The translations are performed in the following four languages: English, French, German and Spanish. - -The model predicts one out of 3129 classes in English which can be found [here](https://huggingface.co/spaces/flax-community/Multilingual-VQA/blob/main/answer_reverse_mapping.json), and then the translated versions are provided based on the language chosen as `Answer Language`. The question can be present or written in any of the following: English, French, German and Spanish. - -For more details, click on `Usage` above or `Article` on the sidebar. 🤗 \ No newline at end of file diff --git a/spaces/flax-community/koclip/executables/embed_captions.py b/spaces/flax-community/koclip/executables/embed_captions.py deleted file mode 100644 index 9cc741bd87f43886dbc965285e33d1ad7e29f5f5..0000000000000000000000000000000000000000 --- a/spaces/flax-community/koclip/executables/embed_captions.py +++ /dev/null @@ -1,30 +0,0 @@ -import argparse -import csv -import numpy as np -import os - -from config import MODEL_LIST -from utils import load_model - - -def main(args): - caption_txt_path = args.text_path - f = open(caption_txt_path) - captions = [sent.strip() for sent in f.readlines()] - - for model_name in MODEL_LIST: - model, processor = load_model(f"koclip/{model_name}") - captions_processed = [processor(sent,images=None,return_tensors='jax') for sent in captions] - vec = [np.asarray(model.get_text_features(**c)) for c in captions_processed] - - with open(os.path.join(args.out_path, f"{model_name}.tsv"), "a+") as f: - writer = csv.writer(f, delimiter="\t") - for text, feature in zip(captions, vec): - writer.writerow([text, ",".join(map(lambda x: str(x), feature))]) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--text_path", default="cifar100_captions_kr.txt") - parser.add_argument("--out_path", default="features/text") - args = parser.parse_args() - main(args) \ No newline at end of file diff --git a/spaces/flax-community/roberta-hindi/About/intro.md b/spaces/flax-community/roberta-hindi/About/intro.md deleted file mode 100644 index 2dbdf190e92f1f0cee321db3c8295ebb312d47e3..0000000000000000000000000000000000000000 --- a/spaces/flax-community/roberta-hindi/About/intro.md +++ /dev/null @@ -1,6 +0,0 @@ -# RoBERTa base model for Hindi language - -[Pretrained model on Hindi language](https://huggingface.co/flax-community/roberta-hindi) using a masked language modeling (MLM) objective. Model is able to achieve competitive accuracy compared to pre-existing models on downstream tasks like NamedEntityRecognition and Classification. There are some MLM examples which show that there is a visible room for improvement, but this should serve well as a good base model for hindi languages & could be fine-tuned on specific datasets. - -> This is part of the -[Flax/Jax Community Week](https://discuss.huggingface.co/t/pretrain-roberta-from-scratch-in-hindi/7091), organized by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google. diff --git a/spaces/frncscp/bullerengue/musika-bullerengue-alpha/README.md b/spaces/frncscp/bullerengue/musika-bullerengue-alpha/README.md deleted file mode 100644 index 4aeb4bd586239fb5b07159de681ba8e911e66caa..0000000000000000000000000000000000000000 --- a/spaces/frncscp/bullerengue/musika-bullerengue-alpha/README.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -license: mit -tags: -- audio -- music -- generation -- tensorflow -- keras -library_name: keras ---- - -# Musika Model: musika_bullerengue_alpha -## Model provided by: frncscp - -Pretrained musika_bullerengue_alpha model for the [Musika system](https://github.com/marcoppasini/musika) for fast infinite waveform music generation. -Introduced in [this paper](https://arxiv.org/abs/2208.08706). - -## How to use - -You can generate music from this pretrained musika_bullerengue_alpha model using the notebook available [here](https://colab.research.google.com/drive/1HJWliBXPi-Xlx3gY8cjFI5-xaZgrTD7r). - -### Model description - -This pretrained GAN system consists of a ResNet-style generator and discriminator. During training, stability is controlled by adapting the strength of gradient penalty regularization on-the-fly. The gradient penalty weighting term is contained in *switch.npy*. The generator is conditioned on a latent coordinate system to produce samples of arbitrary length. The latent representations produced by the generator are then passed to a decoder which converts them into waveform audio. -The generator has a context window of about 12 seconds of audio. \ No newline at end of file diff --git a/spaces/gagan3012/summarization/src/__init__.py b/spaces/gagan3012/summarization/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/geokanaan/arabeasy/README.md b/spaces/geokanaan/arabeasy/README.md deleted file mode 100644 index 14e7ee079a0a2c3a8cde0b17212df9b005b6c23f..0000000000000000000000000000000000000000 --- a/spaces/geokanaan/arabeasy/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: arabeasy -emoji: 🇱🇧🗣️✍🏼 -colorFrom: -colorTo: -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/georgesX/finetuned_diffusion/utils.py b/spaces/georgesX/finetuned_diffusion/utils.py deleted file mode 100644 index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000 --- a/spaces/georgesX/finetuned_diffusion/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -def is_google_colab(): - try: - import google.colab - return True - except: - return False \ No newline at end of file diff --git a/spaces/giesAIexperiments/coursera-assistant-3d-printing-applications/main.py b/spaces/giesAIexperiments/coursera-assistant-3d-printing-applications/main.py deleted file mode 100644 index 4389025f804091dad9035a6880471d9627b04dae..0000000000000000000000000000000000000000 --- a/spaces/giesAIexperiments/coursera-assistant-3d-printing-applications/main.py +++ /dev/null @@ -1,13 +0,0 @@ -from utils import get_search_index, generate_answer, set_model_and_embeddings, set_session_id - - -def index(model, session_id): - set_session_id(session_id) - set_model_and_embeddings(model) - get_search_index(model) - return True - - -def run(question, model, session_id): - index(model, session_id) - return generate_answer(question) diff --git a/spaces/giswqs/geospatial-dataviz/pages/00_home.py b/spaces/giswqs/geospatial-dataviz/pages/00_home.py deleted file mode 100644 index 5fe2ae8c2282421447139411dbdda18bbd3dd8ea..0000000000000000000000000000000000000000 --- a/spaces/giswqs/geospatial-dataviz/pages/00_home.py +++ /dev/null @@ -1,18 +0,0 @@ -import solara - - -@solara.component -def Page(): - with solara.Column(align="center"): - markdown = """ - ## An interactive web app for visualizing geospatial data - - ### Introduction - - - Web App: - - GitHub: - - Hugging Face: - - """ - - solara.Markdown(markdown) diff --git a/spaces/glt3953/app-text_image_hed/README.md b/spaces/glt3953/app-text_image_hed/README.md deleted file mode 100644 index 5997b12b2ae136d1e62abe07d227016d48479497..0000000000000000000000000000000000000000 --- a/spaces/glt3953/app-text_image_hed/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: App-text Image Hed -emoji: 🔥 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gotiQspiryo/whisper-ui/examples/2008 Acura Tl Type S Blue Book Value Cortometraggio Scann.md b/spaces/gotiQspiryo/whisper-ui/examples/2008 Acura Tl Type S Blue Book Value Cortometraggio Scann.md deleted file mode 100644 index 4a791d710f3e1d96396ee5304ea7d7a8db8c1363..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/2008 Acura Tl Type S Blue Book Value Cortometraggio Scann.md +++ /dev/null @@ -1,6 +0,0 @@ -

2008 Acura Tl Type S Blue Book Value cortometraggio scann


Download File ✏ ✏ ✏ https://urlgoal.com/2uyLNF



- - aaccfb2cb3
-
-
-

diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Cracked LS DYNA Full Download Free Where to Find and Download the Latest Version.md b/spaces/gotiQspiryo/whisper-ui/examples/Cracked LS DYNA Full Download Free Where to Find and Download the Latest Version.md deleted file mode 100644 index 777c0ec87d8c540cfd639537943debd7b614d034..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Cracked LS DYNA Full Download Free Where to Find and Download the Latest Version.md +++ /dev/null @@ -1,15 +0,0 @@ - -

In LS DYNA 971, code roots are non-linear and they use explicit time integration and dynamic finite element analysis is performed. LS DYNA 971 R7 can also be used for performing Quasi-static simualtion plus it can perform shock underwater, failure analysis, thermal analysis. LS DYNA 971 R7 also provides various fixes and extensions. It has fixed shell element stress mapping by *INITIAL_STRESS_SHELL. It has also fixed the stress initialization for tetrahedral elements of element formulation 4. LS DYNA has also fixed the mass scaling message for spot welds. It has also fixed a memory error with the material type 3 in implicit analysis. All in all LS DYNA 971 R7 is an impressive finite element program which can be used for simulating complex real world problems. You can also download MSC Dytran 2018.

-

LS DYNA 971 R7 Free DownloadLS DYNA 971 R7 Latest Version and Single Link for Windows. It is Also full offline Setup and standalone installer and Compressed Version of LS DYNA Latest Version Download For Pc.LS DYNA 971 R7 DescriptionLS-DYNA is a finite element program that can simulate real world problems. The LS-DYNA software is used in manufacturing and engineering, automotive, aerospace, construction and military industries. This software has been optimized for Unix, Linux, and Windows operating systems. The roots of the nonlinear code are placed, and using the explicit time integration, the dynamic analysis of the finite element is performed.Non-linear means to have one of the following:. Changing boundary conditions such as contact between components that change over time.

-

Cracked LS DYNA Full Download Free


Download Zip ••• https://urlgoal.com/2uyN4D



-

In­put files can al­so be pre­pared with the in­stant aid of a graph­i­cal pre­proces­sor.There are many third par­ty soft­ware prod­ucts avail­able for pre­pro­cess­ing LS-DY­NA in­put files. LSTC al­so de­vel­ops its own pre­proces­sor, LS-Pre­Post, which is freely dis­trib­uted and runs with­out a li­cense. Li­censees of LS-DY­NA au­to­mat­i­cal­ly have ac­cess to all of the pro­gram's ca­pa­bil­i­ties, from sim­ple lin­ear sta­t­ic me­chan­i­cal analy­sis up to ad­vanced ther­mal and flow solv­ing meth­ods. Fur­ther­more, they have full use of LS-OPT, a stand­alone de­sign op­ti­miza­tion and prob­a­bilis­tic analy­sis pack­age with an in­ter­face to LS-DY­NA.

-

Over the years Predictive Engineering has enjoyed presenting at both the North American and the European LS-DYNA conferences. Please preuse some of our presentations and feel free to download relevant examples.

-

The first phase of this test program was to develop a validated FEA model that could be used to predict the impact response of additive manufactured 3D lattice structures. The additive material used for the lattice structure was a methacrylate photopolymer. Standard static compression, tension and bulk modulus testing was performed on 20 mm thick blocks. The same samples were subjected to impact testing at various strain rates. The static and dynamic data was then fitted onto a series of strain-rate dependent curves. The final *MAT_181 law was then validated against these same coupon tests and shown to have good agreement. This material law was then applied to a 3D lattice model for virtual impact testing. Unfortunately the full-on lattice simulations showed no correlation between FEA and test. Although the material law development was accurate to the coupons and the FE model was verified to other numerical tests, it was reasoned that the material characterization had radically changed from large sample (centimeters) to lattice structure (millimeters).

-

A core challenge to any finite element analysis (FEA) is figuring out loads and how to apply them. For static events, it is usually straightforward. In the case of durability testing, loads are obtained from accelerometers mounted on vehicles that are driven for hours, if not days on test tracks or routes that hopefully replicate the most severe road conditions possible. These accelerations can then be numerically processed and used for various frequency domain analyses such as a random vibration analysis (i.e., PSD), a frequency response analysis, or steady state dynamics. Although powerful and useful, these solution sequences are all based on the linear normal modes response and do not account for the nonlinear evolution of the structure as it shakes, rattles and rolls. As for a nonlinear material response, forget about it.

-

Our approach is to describe how one can take the full acceleration time history and with little sacrifice in accuracy, perform a nonlinear, transient dynamic implicit analysis over a time span of 5 to 10 seconds. The reason for choosing implicit analysis is based on two factors: (i) the necessity for finely detailed meshes in regions of high-stress, and (ii) quick solution times.

-

The dynamic movement of subsea ropes presents an interesting numerical challenge due to the coupling of drag forces with the dynamic response of the rope. Although a FSI approach of fully coupling the surrounding seawater to the rope is theoretically possible it lies beyond the reach of practical engineering when discussing rope lengths in kilometers and possible rope movements in hundreds of meters. A new analysis technique is presented where the drag forces associated with subsea dynamic rope movement are directly integrated into the solution using the LS-DYNA user subroutine, LOADUD. Drag forces are calculated from analytical solutions to provide discrete drag forces as a function of rope position and velocity. This technique avoids the complexity of a fully-coupled FSI solution while providing the major benefits capturing how the rope will dynamically move while lifting heavy loads while being subjected to strong sea currents. Results are presenting showing how a two kilometer rope would dynamically behave while lifting a heavy load from sea bottom to surface under stratified sea currents.

-

Smoothed Particle Hydrodynamics (SPH) has quickly become one of the most popular mesh-free methods since its introduction in 1977. In the recent years, a great amount of research has been focused on addressing some of the common computational time associated with the SPH method. One of the remaining hurdles is the long computational associated with building the neighbor list. Because of the nature of the original SPH codes (astropyshics), the neighbor search is commonly performed for ecery element in the domain at each time step.

-

LS-DYNA consists of a single executable file and is entirely command-line driven. Therefore, all that is required to run LS-DYNA is a command shell, the executable, an input file, and enough free disk space to run the calculation. All input files are in simple ASCII format and thus can be prepared using any text editor. Input files can also be prepared with the aid of a graphical preprocessor. There are many third-party software products available for preprocessing LS-DYNA input files. LSTC also develops its own preprocessor, LS-PrePost, which is freely distributed and runs without a license. Licensees of LS-DYNA automatically have access to all of the program's capabilities, from simple linear static mechanical analysis up to advanced thermal and flow solving methods. Furthermore, they have full use of LSTC's LS-OPT software, a standalone design optimization and probabilistic analysis package with an interface to LS-DYNA.

-

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Create Engaging Learning Content With Your 5 Senses The Ultimate Copywriting Handbook.md b/spaces/gotiQspiryo/whisper-ui/examples/Create Engaging Learning Content With Your 5 Senses The Ultimate Copywriting Handbook.md deleted file mode 100644 index 2fda679a76f07fe06cdab8d127017a8e2335f784..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Create Engaging Learning Content With Your 5 Senses The Ultimate Copywriting Handbook.md +++ /dev/null @@ -1,27 +0,0 @@ - -

Teaching the five senses to little learners is such a fun sensory experience as it allows your students to focus on one sense. (Pst, need some ideas for hands-on sensory and science activities focused around our five senses? Check out our Five Senses Science Unit here!) And to get our youngest learners engaging in some hands-on activities take a look at our Pre-K Five Senses Science Unit.

-

How To Create Engaging Learning – Copywrite With Your 5 Senses


Download ❤❤❤ https://urlgoal.com/2uyMKl



-

Along with hands-on activities, another great way to teach and reinforce the five senses is through reading and literature. Here we have compiled some of the best books all about the Five Senses that will enhance your lesson as you explore the world of touch, taste, sight, hearing and smell!

-

With life-like pictures, prompting questions and fun puzzles, this Dover Coloring Book about the five senses makes for a great resource in your classroom. It offers many hands-on activities to solidify learning.

-

This book makes an excellent addition to your classroom library as it has simple text with clear pictures. Your students will learn about the different ways they engage their senses on a daily basis.

-

-

Take your class on a ride in the Magic School Bus and explore the five senses in the fun, engaging and silly way that all Magic School Bus trips entail! Your little learners will be transported outside of their classroom and into the magical and captivating world of science!

-

This book makes another fantastic fact-based approach to learning about our five senses. With bright and clear photographs of real life examples, learn about your nervous system and how all your senses work together.

-


My Five Senses was written by Margaret Miller. This nonfiction book uses an easy to read text and large photographs to introduce kids to all the things they can do with their senses. The engaging photographs help little ones to recognize the ways that they already use their senses.

-

God created us with five basic senses: sight, smell, hearing, taste, and touch. He intricately designed each sense organ to transmit information to our brain. Our five senses help us understand and notice what is happening in our world. Engaging God with all five of our senses is an important aspect of our faith journey.

-

Much of the time, learning is multi sensory experience. With the right tools and lesson plans, you can engage all five of the senses in your elementary or preschool classroom. And language instruction in no exception. While teaching and supplementing your Spanish curriculum lessons, there are plenty of creative ways to engage the senses.

-

Touch
Engaging this sense is all about getting your students active and hands-on. The best way to use touch and kinesthetic techniques in your instruction is often to plan games. Memory games, crafts, outdoor activities, and charades are just some examples of these activities. Games are also a great way to combine all of the senses together, creating a complete learning experience.

-

Inside: Hone your art senses! These multisensory artworks can be explored through the lens of the five senses. Use this free art worksheet to analyze these artworks with your students.

-

Irreverent copywriter on a mission to stamp out gobbledygook and to add sparkle to business blogs.A sentence without sensory words from a blog opening:Imagine your writing is slowing readers down.

-

Sensory development refers to our five senses which allows us to explore the world around us - sight, sound, touch, taste and smell. During their early childhood years, children are most attracted to activities that engage their senses, such as sensory play and texture crates. Children crave sensory inputs to make sense of the new world around them as they learn and create! All 5 senses should be incorporated into different activities that are crucial for your child's brain development and cognitive growth.

-

As your child is exploring and learning about the world around them, it is important for them to learn about how they are able to do so with their five senses. Learning about the five senses also helps children know more about their body and explore how they are able to control their body parts, while acquiring vocabulary to describe how things look, hear, taste, feel or sound.

-

Squizzel Box has a Five Senses thematic learning box with 10 activities for your child to explore their five senses! The activities include science experiments, DIY crafts and hands-on activities. It is the perfect starter kit to introduce the 5 senses to your child. To make it more exciting, materials and step-by-step instructions are included in the box. That means fuss-free learning for busy working parents!

-

Easter egg writing tray! This tray has purple sprinkles and bunny sprinkles in it. Students find an egg letter match, then write the uppercase and lowercase letter in the tray. Writing trays are the best tool I use for practicing letters. Students get super excited when they see writing trays out! When was the last time your students were excited about handwriting worksheets? This is fun, engaging hands-on learning! Try writing trays in your classroom! They are a game changer!

-

Another way is to create a chart where students place their example from the text with sensory language on the left side of the chart. Then, either that student, or the entire class working together, decides which of the 5 senses were evoked in that sensory details (could be more than one sense).

-

This week the Early Childhood Education Team is sharing ideas for our five senses theme. Let us show you how you can explore the senses with a set of magnetic letters. Plus, you will be working on building phonological awareness, learning phonics skills and strengthening writing abilities.

-

Come explore the 5 senses for kids with this fun, engaginge five senses for kids lesson plan. with us as part of our 5 senses unit. We explore some great books, videos, learned about the sense of touch for kindergarten by making and edible skin layers project, hands-on sense of sight activities, dissecting a cow eye experiment and so much more. This is a five senses for kindergarten, preschool, pre-K, first grade, 2nd grade, and 3rd graders is a great way to learn about the sense of sight, sounds, taste, touch, and smell.

-

Beth Gorden is the creative multi-tasking creator of 123 Homeschool 4 Me. As a busy homeschooling mother of six, she strives to create hands-on learning activities and worksheets that kids will love to make learning FUN! She has created over 1 million pages of printables to help teach kids ABCs, science, English grammar, history, math, and so much more! Beth is also the creator of 2 additional sites with even more educational activities and FREE printables - www.kindergartenworksheetsandgames.com and www.preschoolplayandlearn.com

-

Put a lot of time into writing your subject line just like you would writing an engaging blog headline. If you spend four hours total with your email copywriting, you should spend half of that on your subject line.

-

We hope you and your little ones love exploring a pumpkin with your five senses! If you try this activity with your students, remember to tag us on social media and leave your feedback in the comment section below to let us know how it went!

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/roberta/preprocess_RACE.py b/spaces/gradio/HuBERT/examples/roberta/preprocess_RACE.py deleted file mode 100644 index cdd66072718ccb6033304c97926271909a17f9d6..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/roberta/preprocess_RACE.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import json -import os -import re - - -class InputExample: - def __init__(self, paragraph, qa_list, label): - self.paragraph = paragraph - self.qa_list = qa_list - self.label = label - - -def get_examples(data_dir, set_type): - """ - Extract paragraph and question-answer list from each json file - """ - examples = [] - - levels = ["middle", "high"] - set_type_c = set_type.split("-") - if len(set_type_c) == 2: - levels = [set_type_c[1]] - set_type = set_type_c[0] - for level in levels: - cur_dir = os.path.join(data_dir, set_type, level) - for filename in os.listdir(cur_dir): - cur_path = os.path.join(cur_dir, filename) - with open(cur_path, "r") as f: - cur_data = json.load(f) - answers = cur_data["answers"] - options = cur_data["options"] - questions = cur_data["questions"] - context = cur_data["article"].replace("\n", " ") - context = re.sub(r"\s+", " ", context) - for i in range(len(answers)): - label = ord(answers[i]) - ord("A") - qa_list = [] - question = questions[i] - for j in range(4): - option = options[i][j] - if "_" in question: - qa_cat = question.replace("_", option) - else: - qa_cat = " ".join([question, option]) - qa_cat = re.sub(r"\s+", " ", qa_cat) - qa_list.append(qa_cat) - examples.append(InputExample(context, qa_list, label)) - - return examples - - -def main(): - """ - Helper script to extract paragraphs questions and answers from RACE datasets. - """ - parser = argparse.ArgumentParser() - parser.add_argument( - "--input-dir", - help="input directory for downloaded RACE dataset", - ) - parser.add_argument( - "--output-dir", - help="output directory for extracted data", - ) - args = parser.parse_args() - - if not os.path.exists(args.output_dir): - os.makedirs(args.output_dir, exist_ok=True) - - for set_type in ["train", "dev", "test-middle", "test-high"]: - examples = get_examples(args.input_dir, set_type) - qa_file_paths = [ - os.path.join(args.output_dir, set_type + ".input" + str(i + 1)) - for i in range(4) - ] - qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths] - outf_context_path = os.path.join(args.output_dir, set_type + ".input0") - outf_label_path = os.path.join(args.output_dir, set_type + ".label") - outf_context = open(outf_context_path, "w") - outf_label = open(outf_label_path, "w") - for example in examples: - outf_context.write(example.paragraph + "\n") - for i in range(4): - qa_files[i].write(example.qa_list[i] + "\n") - outf_label.write(str(example.label) + "\n") - - for f in qa_files: - f.close() - outf_label.close() - outf_context.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py b/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py deleted file mode 100644 index b07e274d202414ce40d00aa64a27cf97bb49c1c3..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -import os.path as osp -import tqdm -import torch -import torch.nn.functional as F -from shutil import copyfile - -from npy_append_array import NpyAppendArray - -import fairseq -import soundfile as sf - - -def get_parser(): - parser = argparse.ArgumentParser( - description="compute kmeans codebook from kaldi-computed feats" - ) - # fmt: off - parser.add_argument('data', help='location of tsv files') - parser.add_argument('--split', help='which split to read', required=True) - parser.add_argument('--save-dir', help='where to save the output', required=True) - parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True) - parser.add_argument('--layer', type=int, default=14, help='which layer to use') - # fmt: on - - return parser - - -class Wav2VecFeatureReader(object): - def __init__(self, cp_file, layer): - model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( - [cp_file] - ) - model = model[0] - model.eval() - model.cuda() - self.model = model - self.task = task - self.layer = layer - - def read_audio(self, fname): - """Load an audio file and return PCM along with the sample rate""" - wav, sr = sf.read(fname) - assert sr == 16e3 - - return wav - - def get_feats(self, loc): - x = self.read_audio(loc) - with torch.no_grad(): - source = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - assert source.dim() == 1, source.dim() - with torch.no_grad(): - source = F.layer_norm(source, source.shape) - source = source.view(1, -1) - - m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer) - return m_res["x"].squeeze(0).cpu() - - -def get_iterator(args): - with open(osp.join(args.data, args.split) + ".tsv", "r") as fp: - lines = fp.read().split("\n") - root = lines.pop(0).strip() - files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] - - num = len(files) - reader = Wav2VecFeatureReader(args.checkpoint, args.layer) - - def iterate(): - for fname in files: - w2v_feats = reader.get_feats(fname) - yield w2v_feats - - return iterate, num - - -def main(): - parser = get_parser() - args = parser.parse_args() - - os.makedirs(args.save_dir, exist_ok=True) - - def create_files(dest): - copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv") - if osp.exists(osp.join(args.data, args.split) + ".wrd"): - copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd") - if osp.exists(osp.join(args.data, args.split) + ".phn"): - copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn") - - if osp.exists(dest + ".npy"): - os.remove(dest + ".npy") - npaa = NpyAppendArray(dest + ".npy") - return npaa - - save_path = osp.join(args.save_dir, args.split) - npaa = create_files(save_path) - - generator, num = get_iterator(args) - iterator = generator() - - with open(save_path + ".lengths", "w") as l_f: - for w2v_feats in tqdm.tqdm(iterator, total=num): - print(len(w2v_feats), file=l_f) - - if len(w2v_feats) > 0: - npaa.append(w2v_feats.numpy()) - - -if __name__ == "__main__": - main() diff --git a/spaces/gradio/HuBERT/fairseq/benchmark/dummy_lm.py b/spaces/gradio/HuBERT/fairseq/benchmark/dummy_lm.py deleted file mode 100644 index c6246a0c0e338fa36244b3aa4fb57f189fbffcb6..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/benchmark/dummy_lm.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Optional - -import torch -from .dummy_dataset import DummyDataset -from fairseq.data import Dictionary -from fairseq.dataclass import FairseqDataclass -from fairseq.tasks import FairseqTask, register_task -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class DummyLMConfig(FairseqDataclass): - dict_size: int = 49996 - dataset_size: int = 100000 - tokens_per_sample: int = field( - default=512, metadata={"help": "max sequence length"} - ) - add_bos_token: bool = False - batch_size: Optional[int] = II("dataset.batch_size") - max_tokens: Optional[int] = II("dataset.max_tokens") - max_target_positions: int = II("task.tokens_per_sample") - - -@register_task("dummy_lm", dataclass=DummyLMConfig) -class DummyLMTask(FairseqTask): - def __init__(self, cfg: DummyLMConfig): - super().__init__(cfg) - - # load dictionary - self.dictionary = Dictionary() - for i in range(cfg.dict_size): - self.dictionary.add_symbol("word{}".format(i)) - self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 - logger.info("dictionary: {} types".format(len(self.dictionary))) - - seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 - - self.dummy_src = seq[:-1] - self.dummy_tgt = seq[1:] - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if self.cfg.batch_size is not None: - bsz = self.cfg.batch_size - else: - bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) - self.datasets[split] = DummyDataset( - { - "id": 1, - "net_input": { - "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), - "src_lengths": torch.full( - (bsz,), self.cfg.tokens_per_sample, dtype=torch.long - ), - }, - "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), - "nsentences": bsz, - "ntokens": bsz * self.cfg.tokens_per_sample, - }, - num_items=self.cfg.dataset_size, - item_size=self.cfg.tokens_per_sample, - ) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary diff --git a/spaces/gradio/HuBERT/fairseq/optim/fairseq_optimizer.py b/spaces/gradio/HuBERT/fairseq/optim/fairseq_optimizer.py deleted file mode 100644 index 7e5411753a2ba94f3a7a68316131530b8b17d22a..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/optim/fairseq_optimizer.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq import utils -from fairseq.dataclass.utils import gen_parser_from_dataclass - - -class FairseqOptimizer(object): - def __init__(self, cfg): - super().__init__() - self.cfg = cfg - - @classmethod - def add_args(cls, parser): - """Add optimizer-specific arguments to the parser.""" - dc = getattr(cls, "__dataclass", None) - if dc is not None: - gen_parser_from_dataclass(parser, dc()) - - @property - def optimizer(self): - """Return a torch.optim.optimizer.Optimizer instance.""" - if not hasattr(self, "_optimizer"): - raise NotImplementedError - if not isinstance(self._optimizer, torch.optim.Optimizer): - raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") - return self._optimizer - - @optimizer.setter - def optimizer(self, optimizer): - """Reset optimizer instance.""" - if not hasattr(self, "_optimizer"): - raise NotImplementedError - if not isinstance(self._optimizer, torch.optim.Optimizer): - raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") - self._optimizer = optimizer - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - raise NotImplementedError - - @property - def params(self): - """Return an iterable of the parameters held by the optimizer.""" - for param_group in self.param_groups: - for p in param_group["params"]: - yield p - - @property - def param_groups(self): - return self.optimizer.param_groups - - def __getstate__(self): - return self._optimizer.__getstate__() - - def get_lr(self): - """Return the current learning rate.""" - return self.param_groups[0]["lr"] - - def set_lr(self, lr): - """Set the learning rate.""" - for param_group in self.param_groups: - param_group["lr"] = lr - - def state_dict(self): - """Return the optimizer's state dict.""" - return self.optimizer.state_dict() - - def load_state_dict(self, state_dict, optimizer_overrides=None): - """Load an optimizer state dict. - - In general we should prefer the configuration of the existing optimizer - instance (e.g., learning rate) over that found in the state_dict. This - allows us to resume training from a checkpoint using a new set of - optimizer args. - """ - self.optimizer.load_state_dict(state_dict) - - if optimizer_overrides is not None and len(optimizer_overrides) > 0: - # override learning rate, momentum, etc. with latest values - for group in self.param_groups: - group.update(optimizer_overrides) - - def backward(self, loss): - """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" - loss.backward() - - def all_reduce_grads(self, module): - """Manually all-reduce gradients (if required).""" - if hasattr(module, "all_reduce_grads"): - module.all_reduce_grads() - - def multiply_grads(self, c): - """Multiplies grads by a constant *c*.""" - for p in self.params: - if p.grad is not None: - if torch.is_tensor(c): - c = c.to(p.grad.device) - p.grad.data.mul_(c) - - def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): - """Clips gradient norm.""" - return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) - - def step(self, closure=None, scale=1.0, groups=None): - """Performs a single optimization step.""" - if self.supports_step_with_scale: - if self.supports_groups: - self.optimizer.step(closure, scale=scale, groups=groups) - else: - self.optimizer.step(closure, scale=scale) - else: - if scale != 1.0: - self.multiply_grads(1.0 / scale) - if self.supports_groups: - self.optimizer.step(closure, groups=groups) - else: - self.optimizer.step(closure) - - def zero_grad(self): - """Clears the gradients of all optimized parameters.""" - for p in self.params: - p.grad = None - self.optimizer.zero_grad() - - @property - def supports_memory_efficient_fp16(self): - if hasattr(self.optimizer, "supports_memory_efficient_fp16"): - return self.optimizer.supports_memory_efficient_fp16 - return False - - @property - def supports_step_with_scale(self): - if hasattr(self.optimizer, "supports_step_with_scale"): - return self.optimizer.supports_step_with_scale - return False - - @property - def supports_groups(self): - if hasattr(self.optimizer, "supports_groups"): - return self.optimizer.supports_groups - return False - - @property - def supports_flat_params(self): - """ - Whether the optimizer supports collapsing of the model - parameters/gradients into a single contiguous Tensor. - """ - if hasattr(self.optimizer, "supports_flat_params"): - return self.optimizer.supports_flat_params - return False - - def average_params(self): - pass - - def broadcast_global_state_dict(self, state_dict): - """ - Broadcasts a global state dict to all ranks. - Useful for optimizers that shard state between ranks. - """ - if hasattr(self.optimizer, "broadcast_global_state_dict"): - return self.optimizer.broadcast_global_state_dict(state_dict) - else: - return state_dict - - -class LegacyFairseqOptimizer(FairseqOptimizer): - def __init__(self, args): - self.args = args diff --git a/spaces/gradio/longformer/scripts/mem_profiler.py b/spaces/gradio/longformer/scripts/mem_profiler.py deleted file mode 100644 index 5d8e2f76645b93c8f3e27156a67bf9db00289ccb..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/scripts/mem_profiler.py +++ /dev/null @@ -1,69 +0,0 @@ -from longformer.longformer_encoder_decoder import LongformerEncoderDecoderForConditionalGeneration -from longformer.longformer_encoder_decoder import LongformerEncoderDecoderConfig - -from longformer.longformer import LongformerForMaskedLM -from longformer.longformer import LongformerConfig - -import torch -from torch.utils.data import DataLoader, Dataset -from pytorch_lightning import Trainer -import pytorch_lightning as pl - -seqlen = 1024 * 2 -global_size = seqlen // 100 -attention_window = 256 # one sided - - -class CoolDataset(Dataset): - def __len__(self): - return 1024 # number of examples - - def __getitem__(self, idx): - tokne_ids = torch.tensor([5] * seqlen) - mask = torch.tensor([1] * seqlen) - mask[:global_size] = 2 - return tokne_ids, mask - - -class MemoryProfiler(pl.LightningModule): - - def __init__(self, hparams=None): - super().__init__() - self.hparams = hparams - - config = LongformerEncoderDecoderConfig.from_pretrained('bart-long-4096') - # config = LongformerConfig.from_pretrained('roberta-large') - config.max_position_embeddings = seqlen + 2 - config.gradient_checkpointing = True - config.attention_mode = 'sliding_chunks' - # config.attention_mode = 'n2' - config.attention_window = [attention_window] * config.num_hidden_layers - config.attention_dilation = [1] * config.num_hidden_layers - self.model = LongformerEncoderDecoderForConditionalGeneration(config) - # self.model = LongformerForMaskedLM(config) - - def forward(self, x, y): - print(seqlen, global_size, attention_window, torch.cuda.max_memory_allocated(x.device) / 1024 ** 3) - # import ipdb; ipdb.set_trace() - # return self.model(x, attention_mask=y, decoder_input_ids=x[:, :attention_window * 2], use_cache=False) - return self.model(x, attention_mask=y) - - def training_step(self, batch, batch_idx): - # import ipdb; ipdb.set_trace() - x, y = batch - y_hat = self(x, y) - loss = y_hat[0].sum() - # import ipdb; ipdb.set_trace() - return {'loss': loss} - - def configure_optimizers(self): - return torch.optim.Adam(self.parameters(), lr=0.001) - - def train_dataloader(self): - return DataLoader(CoolDataset(), batch_size=2, num_workers=0) - - -if __name__ == '__main__': - model = MemoryProfiler(hparams={}) - trainer = Trainer(gpus=[0], progress_bar_refresh_rate=1, max_epochs=1, amp_level='O2', use_amp=True) - trainer.fit(model) diff --git a/spaces/gradio/longformer/scripts/pretrain.py b/spaces/gradio/longformer/scripts/pretrain.py deleted file mode 100644 index 8de5bbde08e0e38f8f96d3e2b274f29d2f6fbec6..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/scripts/pretrain.py +++ /dev/null @@ -1,461 +0,0 @@ -import argparse -import glob -import os -import random -import logging -import numpy as np -import math -from tqdm import tqdm -import time -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -from transformers import DataCollatorForLanguageModeling -from transformers.optimization import AdamW, get_linear_schedule_with_warmup - -from torch.utils.data import Dataset, DataLoader -import pytorch_lightning as ptl -from pytorch_lightning.logging.test_tube import TestTubeLogger -from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateLogger - - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -# DONE: reproduce RoBERTa numbers on the Longformer corpus -# DONE: testing ddp single machine -# DONE: testing ddp multiple machines -# DONE: testing resume from checkpoint -# TODO: try on a TPU-pod -# TODO: run on beaker on ai2-server1/2 - - -try: - import torch_xla.core.xla_model as xm -except ImportError: - XLA_AVAILABLE = False -else: - XLA_AVAILABLE = True - - -class MMapTextDataset(Dataset): - def __init__(self, mmap_filename, chunk_size, bos_token_id, eos_token_id): - # `chunk_size - 2` to reserve space for and - self.num_instances = np.memmap(mmap_filename, mode='r', dtype=np.uint16).shape[0] // (chunk_size - 2) - # defer loading the token_ids memmap until after the first __getitem__ call. - # when spawning new processes for ddp, there is a hard limit in python < 3.8 that - # pickle files need to be < 4GB. By waiting until after the first __getitem__ we - # don't have to pickle the memmap - self.token_ids = None - self._mmap_filename = mmap_filename - self._chunk_size = chunk_size - self._bos_token_id = bos_token_id - self._eos_token_id = eos_token_id - - def __len__(self): - return self.num_instances - - def __getitem__(self, i): - if self.token_ids is None: - self.token_ids = np.memmap(self._mmap_filename, mode='r', dtype=np.uint16) - from_index = i * (self._chunk_size - 2) - to_index = (i + 1) * (self._chunk_size - 2) - data = np.concatenate(([self._bos_token_id], self.token_ids[from_index:to_index], [self._eos_token_id])) - return torch.tensor(data, dtype=torch.long) - - # ========================= preprocessing code ========================= # - @staticmethod - def _process_file(full_fname): - "Step 1: tokenize an input text file then save token ids into `np.memmap` shards of size `args.shard_size`" - fname = full_fname.split('/')[-1] - log_filename = f'{args.input_dir}/logs-{args.shard_size}/{fname}.log' - if os.path.isfile(log_filename): - logging.info(f'Skipping {full_fname} ...') - return # log file already exists. Skip current file. - - logging.info(f'Processing {full_fname} ...') - with open(full_fname, 'r') as fin: - token_list = [] - shard_count = 0 - tokens_count = 0 - - def _write_shard(): - if len(token_list) == 0: - return - if token_list[-1] != MMapTextDataset.tokenizer.sep_token_id: # handle a rare case - token_list.append(MMapTextDataset.tokenizer.sep_token_id) - shared_filename = f'{args.input_dir}/shards-{args.shard_size}/{fname}-{shard_count}.bin' - logging.info(f'Writing {len(token_list)} tokens to shared {shared_filename}') - fp = np.memmap(shared_filename, dtype=np.uint16, mode='w+', shape=len(token_list)) - fp[:] = token_list[:] - del fp # flush and close file - for line in tqdm(fin): - line = line.strip() - if line == '': # drop empty lines - continue - tokens = MMapTextDataset.tokenizer.encode(line, add_special_tokens=False) # `__getitem__` adds special tokens - token_list.extend(tokens) - if len(token_list) > args.shard_size: - _write_shard() - tokens_count += len(token_list) - token_list = [] - shard_count += 1 - else: - token_list.append(MMapTextDataset.tokenizer.sep_token_id) - _write_shard() - tokens_count += len(token_list) - with open(log_filename, 'w') as f: - f.write(f'Generated {tokens_count} tokens in {shard_count + 1} shards') - - @staticmethod - def _combine_shards(output_fname, shards_list): - "Step 2: combining memmap shards into one `train.bin` or `val.bin` file" - total_size = 0 - for filename in shards_list: - total_size += np.memmap(filename, mode='r', dtype=np.uint16).shape[0] - logging.info(f'Writing {total_size} tokens to {output_fname}') - all_token_ids = np.empty(total_size, dtype=np.uint16) - last_token_index = 0 - for filename in tqdm(shards_list): - shared = np.memmap(filename, mode='r', dtype=np.uint16) - all_token_ids[last_token_index:last_token_index+len(shared)] = shared[:] - last_token_index += len(shared) - fp = np.memmap(output_fname, dtype=np.uint16, mode='w+', shape=total_size) - fp[:] = all_token_ids[:] - del fp - - @staticmethod - def raw_text_to_mmap(args): - """This is the main preprocessing function. It processes all the text files in `args.input_dir` and - outputs two np.memmap files, one for training and one for validation with ratio `args.train_dev_split`. - Processing each input file involves tokenizing it, sharding it into shards of size `args.shard_size`, - then writing each shard as an np.memmap file. The stream of tokens in the memmap file represents documents - separated with `tokenizer.sep_token`. In `__getitem__`, the `tokenizer.bos_token` and `tokenizer.eos_token` - are added. The reason for not adding them at preprocessing time is to allow different sequence lengths - later on. Notice that this is the "FULL-SENTENCES" setting in the RoBERTa paper, Table2. - """ - MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True) - assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids - all_files = glob.glob(f'{args.input_dir}/*.txt') - - if os.path.exists(f'{args.input_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'): - logger.info("Cache already exists. Remove the cache directory to regenerate") - return - try: - os.mkdir(f'{args.input_dir}/cache/') - except FileExistsError: - pass - try: - os.mkdir(f'{args.input_dir}/shards-{args.shard_size}/') - except FileExistsError: - pass - try: - os.mkdir(f'{args.input_dir}/logs-{args.shard_size}/') # log progrss to be able to resume - except FileExistsError: - pass - - # STEP1: tokenizing and saving to shards - if args.num_preprocessing_workers > 1: - from multiprocessing.pool import Pool - with Pool(args.num_preprocessing_workers) as p: - list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files))) - else: - [MMapTextDataset._process_file(f) for f in tqdm(all_files)] - - # STEP2: shuffling shards and combining them into train.bin and val.bin files - all_shards = glob.glob(f'{args.input_dir}/shards-{args.shard_size}/*.bin') - random.shuffle(all_shards) # shuffling based on shards not individual lines - val_shards_count = int(args.train_dev_split * len(all_shards)) - val_shards = all_shards[:val_shards_count] - train_shards = all_shards[val_shards_count:] - # TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to - # update the dataset to read from multiple shards directly - MMapTextDataset._combine_shards(f'{args.input_dir}/cache/val.bin', val_shards) - MMapTextDataset._combine_shards(f'{args.input_dir}/cache/train.bin', train_shards) - - del MMapTextDataset.tokenizer - # ========================= end preprocessing code ========================= # - - -class Pretrainer(ptl.LightningModule): - - def __init__(self, hparams): - super().__init__() - - self.args = hparams - self.hparams = self.args - - self.model = AutoModelForMaskedLM.from_pretrained(args.model) - self.config = self.model.config - tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) - self.pad_token_id = tokenizer.pad_token_id - self.eos_token_id = tokenizer.eos_token_id - self.bos_token_id = tokenizer.bos_token_id - - logger.info(f'Creating dataset cache from dir {self.args.input_dir}. This could be slow the first time.') - MMapTextDataset.raw_text_to_mmap(args) - - # TODO: add support for other objective functions (whole word masking, BART objectives) - self.data_collator = DataCollatorForLanguageModeling( - tokenizer=tokenizer, mlm=True, mlm_probability=self.args.mlm_prob - ) - self.start_time = 0 - - def to(self, *args, **kwargs): - param_count_before_to = len(list(self.parameters())) - super().to(*args, **kwargs) - if self.trainer.use_tpu: - # need to re-tie the weights after moving to XLA! - self.model.tie_weights() - if 'roberta' in self.args.model: - self.model.lm_head.bias = self.model.lm_head.decoder.bias - param_count_after_to = len(list(self.parameters())) - assert param_count_before_to == param_count_after_to - - def forward(self, input_ids=None, labels=None): - # get the padding mask - 1 for NOT masked, 0 for MASKED/PAD - attention_mask = (input_ids != self.pad_token_id).int() - - # output is loss, prediction_scores, hidden_states - output = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels) - return output[0] # loss - - def training_step(self, batch, batch_nb): - loss = self(**batch) - input_ids = batch['input_ids'] - tensorboard_logs = { - 'input_size': input_ids.numel(), - 'mlm_loss': loss, - 'mlm_bpc': loss/math.log(2), - 'mlm_perplexity': torch.exp(loss), - 'token_per_step': input_ids.numel() * self.args.grad_accum * self.trainer.world_size, - } - if self.start_time != 0: - elapsed_time = time.time() - self.start_time - tensorboard_logs['second_per_batch'] = elapsed_time - self.start_time = time.time() - if self.on_gpu: - tensorboard_logs['memory'] = torch.cuda.memory_allocated(loss.device) / 1024 ** 3 - - return {'loss': loss, 'log': tensorboard_logs} - - def validation_step(self, batch, batch_nb): - # TODO: log how long evaluation takes - self.start_time = 0 # reset training_step timer - loss = self(**batch) - tensorboard_logs = { - 'val_mlm_loss': loss.detach(), - } - return {'val_loss': tensorboard_logs["val_mlm_loss"], 'log': tensorboard_logs} - - def validation_epoch_end(self, outputs): - avg_loss = torch.stack([x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in x['log']]).mean() - if self.use_ddp: - # TODO: PTL is already doing this. Is it still needed here? - # https://github.com/PyTorchLightning/pytorch-lightning/blob/0.8.5/pytorch_lightning/metrics/converters.py#L251 - torch.distributed.all_reduce(avg_loss, op=torch.distributed.ReduceOp.SUM) - avg_loss /= torch.distributed.get_world_size() - elif self.use_tpu: - avg_loss = xm.all_reduce(xm.REDUCE_SUM, avg_loss) / xm.xrt_world_size() - - logs = {'val_mlm_loss': avg_loss} - return {'log': logs, 'progress_bar': logs, "val_loss": avg_loss} - - def configure_optimizers(self): - no_decay = ["bias", "LayerNorm.weight"] - - optimizer_grouped_parameters = [ - { - "params": [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], - "weight_decay": self.args.weight_decay, - }, - { - "params": [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.args.train_steps - ) - return [optimizer], [{"scheduler": scheduler, "interval": "step"}] - - def _get_loader(self, fname, is_train): - dataset = MMapTextDataset(fname, chunk_size=self.args.seqlen, - bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id) - - # TODO: consider `replace_sampler_ddp=True` and removing the following if statement - if self.trainer.use_ddp: - sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=is_train) - shuffle = False - elif self.trainer.use_tpu: - sampler = torch.utils.data.distributed.DistributedSampler( - dataset, - num_replicas=xm.xrt_world_size(), - rank=xm.get_ordinal(), - shuffle=is_train, - ) - shuffle = False - else: - sampler = None - shuffle = is_train - - loader = DataLoader( - dataset, - batch_size=self.args.batch_size, - shuffle=shuffle, - sampler=sampler, - num_workers=self.args.num_workers, - collate_fn=self.data_collator, - drop_last=is_train, - ) - return loader - - def train_dataloader(self): - return self._get_loader(f'{self.args.input_dir}/cache/train.bin', True) - - def val_dataloader(self): - return self._get_loader(f'{self.args.input_dir}/cache/val.bin', False) - - def grad_norm(self, norm_type): - # Override PTL `grad_norm` function to only return `total_grad_norm` instead norms of individual params - # TODO: grad_norm reporting needs to take fp16 loss scale into account - parameters = [p for p in self.parameters() if p.grad is not None] - device = parameters[0].device - total_norm = torch.zeros([], device=device if parameters else None) - norm_type = float(norm_type) - for p in parameters: - param_norm = p.grad.data.pow(norm_type).sum() - total_norm.add_(param_norm) - total_norm = (total_norm ** (1.0 / norm_type)) - return {'total_grad_norm': total_norm} - - @staticmethod - def add_args(parser): - parser.add_argument("--seed", type=int, default=3) - - # Dataset. Some of these params are only useful when generating the dataset cache - parser.add_argument("--input_dir", type=str, default='/net/nfs.corp/s2-research/beltagy/longformer/data/') - # Used only at the preprocessing phase - parser.add_argument("--train_dev_split", type=float, default=0.05) - parser.add_argument("--shard_size", type=int, default=1024 ** 3 // 4) # 250MB - parser.add_argument("--num_preprocessing_workers", type=int, default=1) - # Used only at the training phase - parser.add_argument("--seqlen", type=int, default=512) - parser.add_argument("--mlm_prob", type=float, default=0.15) - - # HF model loading - parser.add_argument("--tokenizer", type=str, default='roberta-base') - parser.add_argument("--model", type=str, default='roberta-base') - - # Checkpointing and logging - parser.add_argument("--save_dir", type=str, default='/runs/') - parser.add_argument("--save_prefix", type=str, default='test', - help="path of output directory is --save_dir/--save_prefix") - parser.add_argument("--resume", type=str, default=None, # It is better to use a different output dir. - help="Path to a checkpoint to load model weights and training state. It overwrites args") - parser.add_argument("--resume_model_only", type=str, default=None, - help="Path to a checkpoint to load model weights but not training state") - parser.add_argument("--log_rate", type=int, default=10) - parser.add_argument("--disable_checkpointing", type=bool, default=False) - - # Training hyperparams - parser.add_argument("--lr", type=float, default=1e-5) - parser.add_argument("--train_steps", type=int, default=3000, help='# training grad. updates') - parser.add_argument("--warmup_steps", type=int, default=1000, help='# warmup grad. updates') - parser.add_argument("--val_every", type=int, default=1000, help='# training grad. updates between evaluations') - parser.add_argument("--val_batches", type=int, default=1000, help='# evaluation **batches**') - parser.add_argument("--weight_decay", type=float, default=0.01) - parser.add_argument("--adam_epsilon", type=float, default=1e-6) - parser.add_argument("--grad_clip", type=float, default=0) # TODO: test this with fp16. Likely not working - - # RoBERTa's tokens_per_step = 2^18 = 512(seqlen) x 1(gpu_count) x 32(batch_size) x 16(grad_accum) - parser.add_argument("--batch_size", type=int, default=32) - parser.add_argument("--grad_accum", type=int, default=1) - - # Compute resources - parser.add_argument("--fp16", type=bool, default=False) - parser.add_argument("--num_workers", type=int, default=0) - parser.add_argument("--gpu_count", type=int, default=1, # `--gpus` is reserved for internal use by PTL - help="Number of gpus. This respects `CUDA_VISIBLE_DEVICES`") - - # For multi-node training, use the PyTorch launch script. The script and instructions can be found here: - # https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py. - # To run PTL in a mode compatible with the launch script, two things are needed: - # - pass the argument `--use_env` to `torch.distributed.launch` - # - make sure `--nproc_per_node` matches `--gpu_count` and `--nnodes` matches `--node_count`. - # For example, to run on 2 nodes, 3 gpus each, the command line on node rank 1 would be like: - # >>>> python -m torch.distributed.launch \ - # --use_env --nnodes 2 --nproc_per_node 3 \ - # --node_rank 1 --master_addr s2-server4 --master_port 12343 \ - # scripts/pretrain.py \ - # --gpu_count 2 --node_count 2 \ - # --input_dir my_data_dir --save_prefix test_multinode - parser.add_argument("--node_count", type=int, default=1, - help="Number of nodes. It needs to match --nnodes of torch.distributed.launch") - parser.add_argument("--tpu_core_count", type=int, default=None) - - return parser - - -def main(args): - random.seed(args.seed * 10) - np.random.seed(args.seed * 100) - torch.manual_seed(args.seed * 1000) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(args.seed * 10000) - - if args.resume_model_only is not None: - pretrainer = Pretrainer.load_from_checkpoint(args.resume_model_only, args) - else: - pretrainer = Pretrainer(args) - - # logger here is a SummaryWritter for tensorboard - # it is used by the trainer, and certain return variables - # from the model are automatically logged - logger = TestTubeLogger( - save_dir=args.save_dir, - name=args.save_prefix, - version=0 # always use version=0 - ) - - checkpoint_callback = ModelCheckpoint( - # model saved to filepath/prefix_.... - filepath=os.path.join(args.save_dir, args.save_prefix, 'checkpoint'), - prefix='', - save_top_k=1, - save_last=True, - verbose=True, - monitor='val_loss', - mode='min', - period=-1, # to allow multiple checkpoints per epoch - ) - - args.val_every *= args.grad_accum # PTL is expecting number of batches_per_gpu - trainer = ptl.Trainer( - gpus=args.gpu_count, - num_nodes=args.node_count, - num_tpu_cores=args.tpu_core_count, - distributed_backend='ddp' if (args.gpu_count > 1 or args.node_count > 1) else None, - replace_sampler_ddp=False, - track_grad_norm=2, - max_epochs=10000, min_epochs=0, max_steps=args.train_steps, # run for many epochs, but stop after max_steps - val_check_interval=args.val_every, limit_val_batches=args.val_batches, - early_stop_callback=None, - row_log_interval=args.log_rate, - progress_bar_refresh_rate=args.log_rate, - logger=logger, - checkpoint_callback=checkpoint_callback if not args.disable_checkpointing else None, - accumulate_grad_batches=args.grad_accum, - resume_from_checkpoint=args.resume, - gradient_clip_val=args.grad_clip, - precision=16 if args.fp16 else 32, amp_level='O2', - num_sanity_val_steps=2, - callbacks=[LearningRateLogger()], - ) - trainer.fit(pretrainer) - - -if __name__ == "__main__": - parser = Pretrainer.add_args(argparse.ArgumentParser(description="pretrain")) - args = parser.parse_args() - main(args) diff --git a/spaces/gradio/monochrome/README.md b/spaces/gradio/monochrome/README.md deleted file mode 100644 index c98c00bdccb80738997773f65a5b4c0b0c230e95..0000000000000000000000000000000000000000 --- a/spaces/gradio/monochrome/README.md +++ /dev/null @@ -1,17 +0,0 @@ - ---- -tags: [gradio-theme] -title: Monochrome -colorFrom: orange -colorTo: purple -sdk: gradio -sdk_version: 3.22.1b1 -app_file: app.py -pinned: false -license: apache-2.0 ---- -# Monochrome -## Description -Add a description of this theme here! -## Contributions -Thanks to [@freddyaboulton](https://huggingface.co/freddyaboulton) for adding this gradio theme! diff --git a/spaces/greenlights/gitapp/README.md b/spaces/greenlights/gitapp/README.md deleted file mode 100644 index 6ffa4bf4a234e7223d54be739a986eeef04c5a98..0000000000000000000000000000000000000000 --- a/spaces/greenlights/gitapp/README.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Gitapp -emoji: 🐨 -colorFrom: yellow -colorTo: pink -sdk: streamlit -app_file: app.py -pinned: false ---- - -# flask-salary-predictor -This is project predicts the salary of the employee based on the experience. - -# Model -model.py trains and saves the model to the disk. -model.pkb the pickle model - -# App -app.py contains all the requiered for flask and to manage APIs. - - - -Procedure-- -Open command Prompt and go to given directory and then run python app.py diff --git a/spaces/gwang-kim/DATID-3D/eg3d/viz/latent_widget.py b/spaces/gwang-kim/DATID-3D/eg3d/viz/latent_widget.py deleted file mode 100644 index 30ce50c4dd37125934152d9db57d88e36c845f5b..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/viz/latent_widget.py +++ /dev/null @@ -1,80 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_utils - -#---------------------------------------------------------------------------- - -class LatentWidget: - def __init__(self, viz): - self.viz = viz - self.latent = dnnlib.EasyDict(x=1, y=0, anim=False, speed=0.25) - self.latent_def = dnnlib.EasyDict(self.latent) - self.step_y = 100 - - def drag(self, dx, dy): - viz = self.viz - self.latent.x += dx / viz.font_size * 4e-2 - self.latent.y += dy / viz.font_size * 4e-2 - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - if show: - imgui.text('Latent') - imgui.same_line(viz.label_w) - seed = round(self.latent.x) + round(self.latent.y) * self.step_y - with imgui_utils.item_width(viz.font_size * 8): - changed, seed = imgui.input_int('##seed', seed, step=0) - if changed: - self.latent.x = seed - self.latent.y = 0 - imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) - frac_x = self.latent.x - round(self.latent.x) - frac_y = self.latent.y - round(self.latent.y) - with imgui_utils.item_width(viz.font_size * 5): - changed, (new_frac_x, new_frac_y) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) - if changed: - self.latent.x += new_frac_x - frac_x - self.latent.y += new_frac_y - frac_y - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) - _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) - if dragging: - self.drag(dx, dy) - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) - _clicked, self.latent.anim = imgui.checkbox('Anim', self.latent.anim) - imgui.same_line(round(viz.font_size * 28.7)) - with imgui_utils.item_width(-2 - viz.button_w * 2 - viz.spacing * 2), imgui_utils.grayed_out(not self.latent.anim): - changed, speed = imgui.slider_float('##speed', self.latent.speed, -5, 5, format='Speed %.3f', power=3) - if changed: - self.latent.speed = speed - imgui.same_line() - snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y)) - if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)): - self.latent = snapped - imgui.same_line() - if imgui_utils.button('Reset', width=-1, enabled=(self.latent != self.latent_def)): - self.latent = dnnlib.EasyDict(self.latent_def) - - if self.latent.anim: - self.latent.x += viz.frame_delta * self.latent.speed - viz.args.w0_seeds = [] # [[seed, weight], ...] - for ofs_x, ofs_y in [[0, 0], [1, 0], [0, 1], [1, 1]]: - seed_x = np.floor(self.latent.x) + ofs_x - seed_y = np.floor(self.latent.y) + ofs_y - seed = (int(seed_x) + int(seed_y) * self.step_y) & ((1 << 32) - 1) - weight = (1 - abs(self.latent.x - seed_x)) * (1 - abs(self.latent.y - seed_y)) - if weight > 0: - viz.args.w0_seeds.append([seed, weight]) - -#---------------------------------------------------------------------------- diff --git a/spaces/h2oai/wave-tour/examples/meta_inline_script.py b/spaces/h2oai/wave-tour/examples/meta_inline_script.py deleted file mode 100644 index 787f960bbd2897fd3cb9fa57a10a1ede5a778aef..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/meta_inline_script.py +++ /dev/null @@ -1,35 +0,0 @@ -# Meta / Inline Script -# Execute arbitrary Javascript. -# --- -from h2o_wave import site, ui - -# This example displays a clock using Javascript. - -page = site['/demo'] - -# Add a placeholder for the clock. -page['example'] = ui.markup_card( - box='1 1 2 1', - title='Time', - content='
', -) - -# Specify the Javascript code to display the clock. -clock_script = ''' -// Locate the placeholder 'div' element in our markup_card. -const clock = document.getElementById("clock"); -const displayTime = () => { clock.innerText = (new Date()).toLocaleString(); }; - -// Display the time every second (1000ms). -window.setInterval(displayTime, 1000); -''' - -# Add the script to the page. -page['meta'] = ui.meta_card(box='', script=ui.inline_script( - # The Javascript code for this script. - content=clock_script, - # Execute this script only if the 'clock' element is available. - targets=['clock'], -)) - -page.save() diff --git a/spaces/haakohu/deep_privacy2/dp2/data/transforms/__init__.py b/spaces/haakohu/deep_privacy2/dp2/data/transforms/__init__.py deleted file mode 100644 index 9ee4bcf4e825af435ffde4c2b6e3c74112f8438f..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/dp2/data/transforms/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .transforms import RandomCrop, CreateCondition, CreateEmbedding, Resize, ToFloat, Normalize -from .stylegan2_transform import StyleGANAugmentPipe diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md deleted file mode 100644 index 1a2633f95e6f6a5e54c8beca102a490036478587..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md +++ /dev/null @@ -1,99 +0,0 @@ -# Setup Builtin Datasets - -Detectron2 has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 expects to find datasets in the structure described below. - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) -contains configs and models that use these builtin datasets. - -## Expected dataset structure for COCO instance/keypoint detection: - -``` -coco/ - annotations/ - instances_{train,val}2017.json - person_keypoints_{train,val}2017.json - {train,val}2017/ - # image files that are mentioned in the corresponding json -``` - -You can use the 2014 version of the dataset as well. - -Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, -which you can download with `./prepare_for_tests.sh`. - -## Expected dataset structure for PanopticFPN: - -``` -coco/ - annotations/ - panoptic_{train,val}2017.json - panoptic_{train,val}2017/ # png annotations - panoptic_stuff_{train,val}2017/ # generated by the script mentioned below -``` - -Install panopticapi by: -``` -pip install git+https://github.com/cocodataset/panopticapi.git -``` -Then, run `python prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. - -## Expected dataset structure for LVIS instance segmentation: -``` -coco/ - {train,val,test}2017/ -lvis/ - lvis_v0.5_{train,val}.json - lvis_v0.5_image_info_test.json -``` - -Install lvis-api by: -``` -pip install git+https://github.com/lvis-dataset/lvis-api.git -``` - -Run `python prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations for evaluation of models trained on the COCO dataset. - -## Expected dataset structure for cityscapes: -``` -cityscapes/ - gtFine/ - train/ - aachen/ - color.png, instanceIds.png, labelIds.png, polygons.json, - labelTrainIds.png - ... - val/ - test/ - leftImg8bit/ - train/ - val/ - test/ -``` -Install cityscapes scripts by: -``` -pip install git+https://github.com/mcordts/cityscapesScripts.git -``` - -Note: labelTrainIds.png are created using cityscapesescript with: -``` -CITYSCAPES_DATASET=$DETECTRON2_DATASETS/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py -``` -They are not needed for instance segmentation. - -## Expected dataset structure for Pascal VOC: -``` -VOC20{07,12}/ - Annotations/ - ImageSets/ - Main/ - trainval.txt - test.txt - # train.txt or val.txt, if you use these splits - JPEGImages/ -``` diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp deleted file mode 100644 index 44370b4c518408f1f46345c7e3ac07c7db63a485..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -C10_DEFINE_string(predict_net, "", "path to model.pb"); -C10_DEFINE_string(init_net, "", "path to model_init.pb"); -C10_DEFINE_string(input, "", "path to input image"); - -using namespace std; -using namespace caffe2; - -int main(int argc, char** argv) { - caffe2::GlobalInit(&argc, &argv); - string predictNetPath = FLAGS_predict_net; - string initNetPath = FLAGS_init_net; - cv::Mat input = cv::imread(FLAGS_input, cv::IMREAD_COLOR); - - const int height = input.rows; - const int width = input.cols; - // FPN models require divisibility of 32 - assert(height % 32 == 0 && width % 32 == 0); - const int batch = 1; - const int channels = 3; - - // initialize Net and Workspace - caffe2::NetDef initNet_, predictNet_; - CAFFE_ENFORCE(ReadProtoFromFile(initNetPath, &initNet_)); - CAFFE_ENFORCE(ReadProtoFromFile(predictNetPath, &predictNet_)); - - Workspace workSpace; - for (auto& str : predictNet_.external_input()) { - workSpace.CreateBlob(str); - } - CAFFE_ENFORCE(workSpace.CreateNet(predictNet_)); - CAFFE_ENFORCE(workSpace.RunNetOnce(initNet_)); - - // setup inputs - auto data = BlobGetMutableTensor(workSpace.GetBlob("data"), caffe2::CPU); - data->Resize(batch, channels, height, width); - float* ptr = data->mutable_data(); - // HWC to CHW - for (int c = 0; c < 3; ++c) { - for (int i = 0; i < height * width; ++i) { - ptr[c * height * width + i] = static_cast(input.data[3 * i + c]); - } - } - - auto im_info = - BlobGetMutableTensor(workSpace.GetBlob("im_info"), caffe2::CPU); - im_info->Resize(batch, 3); - float* im_info_ptr = im_info->mutable_data(); - im_info_ptr[0] = height; - im_info_ptr[1] = width; - im_info_ptr[2] = 1.0; - - // run the network - CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name())); - - // run 3 more times to benchmark - int N_benchmark = 3; - auto start_time = chrono::high_resolution_clock::now(); - for (int i = 0; i < N_benchmark; ++i) { - CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name())); - } - auto end_time = chrono::high_resolution_clock::now(); - auto ms = chrono::duration_cast(end_time - start_time) - .count(); - cout << "Latency (should vary with different inputs): " - << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; - - // parse Mask R-CNN outputs - caffe2::Tensor bbox( - workSpace.GetBlob("bbox_nms")->Get(), caffe2::CPU); - caffe2::Tensor scores( - workSpace.GetBlob("score_nms")->Get(), caffe2::CPU); - caffe2::Tensor labels( - workSpace.GetBlob("class_nms")->Get(), caffe2::CPU); - caffe2::Tensor mask_probs( - workSpace.GetBlob("mask_fcn_probs")->Get(), caffe2::CPU); - cout << "bbox:" << bbox.DebugString() << endl; - cout << "scores:" << scores.DebugString() << endl; - cout << "labels:" << labels.DebugString() << endl; - cout << "mask_probs: " << mask_probs.DebugString() << endl; - - int num_instances = bbox.sizes()[0]; - for (int i = 0; i < num_instances; ++i) { - float score = scores.data()[i]; - if (score < 0.6) - continue; // skip them - - const float* box = bbox.data() + i * 4; - int label = labels.data()[i]; - - cout << "Prediction " << i << ", xyxy=("; - cout << box[0] << ", " << box[1] << ", " << box[2] << ", " << box[3] - << "); score=" << score << "; label=" << label << endl; - - const float* mask = mask_probs.data() + - i * mask_probs.size_from_dim(1) + label * mask_probs.size_from_dim(2); - - // save the 28x28 mask - cv::Mat cv_mask(28, 28, CV_32FC1); - memcpy(cv_mask.data, mask, 28 * 28 * sizeof(float)); - cv::imwrite("mask" + std::to_string(i) + ".png", cv_mask * 255.); - } - return 0; -} diff --git a/spaces/hasibzunair/fifa-tryon-demo/rembg/bg.py b/spaces/hasibzunair/fifa-tryon-demo/rembg/bg.py deleted file mode 100644 index 57ac6b591b1af37b7ffb3959033409bc5a6e9ef3..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/rembg/bg.py +++ /dev/null @@ -1,176 +0,0 @@ -import io -from enum import Enum -from typing import List, Optional, Union - -import numpy as np -from cv2 import ( - BORDER_DEFAULT, - MORPH_ELLIPSE, - MORPH_OPEN, - GaussianBlur, - getStructuringElement, - morphologyEx, -) -from PIL import Image -from PIL.Image import Image as PILImage -from pymatting.alpha.estimate_alpha_cf import estimate_alpha_cf -from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml -from pymatting.util.util import stack_images -from scipy.ndimage.morphology import binary_erosion - -from .session_base import BaseSession -from .session_factory import new_session - -kernel = getStructuringElement(MORPH_ELLIPSE, (3, 3)) - - -class ReturnType(Enum): - BYTES = 0 - PILLOW = 1 - NDARRAY = 2 - - -def alpha_matting_cutout( - img: PILImage, - mask: PILImage, - foreground_threshold: int, - background_threshold: int, - erode_structure_size: int, -) -> PILImage: - - if img.mode == "RGBA" or img.mode == "CMYK": - img = img.convert("RGB") - - img = np.asarray(img) - mask = np.asarray(mask) - - is_foreground = mask > foreground_threshold - is_background = mask < background_threshold - - structure = None - if erode_structure_size > 0: - structure = np.ones( - (erode_structure_size, erode_structure_size), dtype=np.uint8 - ) - - is_foreground = binary_erosion(is_foreground, structure=structure) - is_background = binary_erosion(is_background, structure=structure, border_value=1) - - trimap = np.full(mask.shape, dtype=np.uint8, fill_value=128) - trimap[is_foreground] = 255 - trimap[is_background] = 0 - - img_normalized = img / 255.0 - trimap_normalized = trimap / 255.0 - - alpha = estimate_alpha_cf(img_normalized, trimap_normalized) - foreground = estimate_foreground_ml(img_normalized, alpha) - cutout = stack_images(foreground, alpha) - - cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8) - cutout = Image.fromarray(cutout) - - return cutout - - -def naive_cutout(img: PILImage, mask: PILImage) -> PILImage: - empty = Image.new("RGBA", (img.size), 0) - cutout = Image.composite(img, empty, mask) - return cutout - - -def get_concat_v_multi(imgs: List[PILImage]) -> PILImage: - pivot = imgs.pop(0) - for im in imgs: - pivot = get_concat_v(pivot, im) - return pivot - - -def get_concat_v(img1: PILImage, img2: PILImage) -> PILImage: - dst = Image.new("RGBA", (img1.width, img1.height + img2.height)) - dst.paste(img1, (0, 0)) - dst.paste(img2, (0, img1.height)) - return dst - - -def post_process(mask: np.ndarray) -> np.ndarray: - """ - Post Process the mask for a smooth boundary by applying Morphological Operations - Research based on paper: https://www.sciencedirect.com/science/article/pii/S2352914821000757 - args: - mask: Binary Numpy Mask - """ - mask = morphologyEx(mask, MORPH_OPEN, kernel) - mask = GaussianBlur(mask, (5, 5), sigmaX=2, sigmaY=2, borderType=BORDER_DEFAULT) - mask = np.where(mask < 127, 0, 255).astype(np.uint8) # convert again to binary - return mask - - -def remove( - data: Union[bytes, PILImage, np.ndarray], - alpha_matting: bool = False, - alpha_matting_foreground_threshold: int = 240, - alpha_matting_background_threshold: int = 10, - alpha_matting_erode_size: int = 10, - session: Optional[BaseSession] = None, - only_mask: bool = False, - post_process_mask: bool = False, -) -> Union[bytes, PILImage, np.ndarray]: - - if isinstance(data, PILImage): - return_type = ReturnType.PILLOW - img = data - elif isinstance(data, bytes): - return_type = ReturnType.BYTES - img = Image.open(io.BytesIO(data)) - elif isinstance(data, np.ndarray): - return_type = ReturnType.NDARRAY - img = Image.fromarray(data) - else: - raise ValueError("Input type {} is not supported.".format(type(data))) - - if session is None: - session = new_session("u2net") - - masks = session.predict(img) - cutouts = [] - - for mask in masks: - if post_process_mask: - mask = Image.fromarray(post_process(np.array(mask))) - - if only_mask: - cutout = mask - - elif alpha_matting: - try: - cutout = alpha_matting_cutout( - img, - mask, - alpha_matting_foreground_threshold, - alpha_matting_background_threshold, - alpha_matting_erode_size, - ) - except ValueError: - cutout = naive_cutout(img, mask) - - else: - cutout = naive_cutout(img, mask) - - cutouts.append(cutout) - - cutout = img - if len(cutouts) > 0: - cutout = get_concat_v_multi(cutouts) - - if ReturnType.PILLOW == return_type: - return cutout - - if ReturnType.NDARRAY == return_type: - return np.asarray(cutout) - - bio = io.BytesIO() - cutout.save(bio, "PNG") - bio.seek(0) - - return bio.read() diff --git a/spaces/hi9/Core-4-with-QA-on-UC/README.md b/spaces/hi9/Core-4-with-QA-on-UC/README.md deleted file mode 100644 index 2373785debe44593a155d9a47316791e9cfdb7fc..0000000000000000000000000000000000000000 --- a/spaces/hi9/Core-4-with-QA-on-UC/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Core 4 With QA on UC -emoji: 👁 -colorFrom: indigo -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNetTrainerMTLlate.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNetTrainerMTLlate.py deleted file mode 100644 index 60e2f19cf3251881df5667a83895c15936f5fb24..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNetTrainerMTLlate.py +++ /dev/null @@ -1,470 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from collections import OrderedDict -from typing import Tuple - -import numpy as np -import torch -from nnunet.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation -from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 -from nnunet.utilities.to_torch import maybe_to_torch, to_cuda -from nnunet.network_architecture.generic_UNet_MTLlate import Generic_UNet_MTLlate -from nnunet.network_architecture.initialization import InitWeights_He -from nnunet.network_architecture.neural_network import SegmentationNetwork -from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \ - get_patch_size, default_3D_augmentation_params -from nnunet.training.dataloading.dataset_loading import unpack_dataset -from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer -from nnunet.utilities.nd_softmax import softmax_helper -from sklearn.model_selection import KFold -from torch import nn -from torch.cuda.amp import autocast -from nnunet.training.learning_rate.poly_lr import poly_lr -from batchgenerators.utilities.file_and_folder_operations import * -import matplotlib.pyplot as plt - - -class nnUNetTrainerMTLlate(nnUNetTrainer): - - - def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, - unpack_data=True, deterministic=True, fp16=False): - super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, - deterministic, fp16) - self.max_num_epochs = 1 - self.num_batches_per_epoch = 25 # 250 default - self.num_val_batches_per_epoch = 50 # 50 default - self.initial_lr = 1e-2 - self.deep_supervision_scales = None - self.ds_loss_weights = None - - self.pin_memory = True - - def initialize(self, training=True, force_load_plans=False, disable_deepsupervision=False): - """ - - replaced get_default_augmentation with get_moreDA_augmentation - - enforce to only run this code once - - loss function wrapper for deep supervision - - :param training: - :param force_load_plans: - :return: - """ - if not self.was_initialized: - maybe_mkdir_p(self.output_folder) - - if force_load_plans or (self.plans is None): - self.load_plans_file() - - self.process_plans(self.plans) - print("NUM_pool_op_kernel_sizes") - print(self.net_num_pool_op_kernel_sizes) - self.setup_DA_params() - - ################# Here we wrap the loss for deep supervision ############ - # we need to know the number of outputs of the network - net_numpool = len(self.net_num_pool_op_kernel_sizes) - - # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases - # this gives higher resolution outputs more weight in the loss - weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) - - # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 - mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) - weights[~mask] = 0 - weights = weights / weights.sum() - self.ds_loss_weights = weights - # now wrap the loss - self.loss_zone = MultipleOutputLoss2(self.loss, self.ds_loss_weights) - - self.loss_front = MultipleOutputLoss2(self.loss, None) - ################# END ################### - - self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + - "_stage%d" % self.stage) - if training: - # data get loaded here - self.dl_tr, self.dl_val = self.get_basic_generators() - if self.unpack_data: - print("unpacking dataset") - unpack_dataset(self.folder_with_preprocessed_data) - print("done") - else: - print( - "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " - "will wait all winter for your model to finish!") - - self.tr_gen, self.val_gen = get_moreDA_augmentation( - self.dl_tr, self.dl_val, - self.data_aug_params[ - 'patch_size_for_spatialtransform'], - self.data_aug_params, - deep_supervision_scales=self.deep_supervision_scales, - pin_memory=self.pin_memory, - use_nondetMultiThreadedAugmenter=False - ) - self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), - also_print_to_console=False) - self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), - also_print_to_console=False) - else: - pass - - self.initialize_network() - self.initialize_optimizer_and_scheduler() - - assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) - else: - self.print_to_log_file('self.was_initialized is True, not running self.initialize again') - self.was_initialized = True - - def initialize_network(self): - """ - - momentum 0.99 - - SGD instead of Adam - - self.lr_scheduler = None because we do poly_lr - - deep supervision = True - - i am sure I forgot something here - - Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though - :return: - """ - if self.threeD: - conv_op = nn.Conv3d - dropout_op = nn.Dropout3d - norm_op = nn.InstanceNorm3d - - else: - conv_op = nn.Conv2d - dropout_op = nn.Dropout2d - norm_op = nn.InstanceNorm2d - - norm_op_kwargs = {'eps': 1e-5, 'affine': True} - dropout_op_kwargs = {'p': 0, 'inplace': True} - net_nonlin = nn.LeakyReLU - net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} - self.network = Generic_UNet_MTLlate(self.num_input_channels, self.base_num_features, self.num_classes, - len(self.net_num_pool_op_kernel_sizes), - self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, - dropout_op_kwargs, - net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), - self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) - - print("number of parameters") - print(sum(param.numel() for param in self.network.parameters())) - - if torch.cuda.is_available(): - self.network.cuda() - self.network.inference_apply_nonlin = softmax_helper - - def initialize_optimizer_and_scheduler(self): - assert self.network is not None, "self.initialize_network must be called first" - self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, - momentum=0.99, nesterov=True) - self.lr_scheduler = None - - def run_online_evaluation(self, output, target): - """ - due to deep supervision the return value and the reference are now lists of tensors. We only need the full - resolution output because this is what we are interested in in the end. The others are ignored - :param output: - :param target: - :return: - """ - target = target[0] - output = output[0] - return super().run_online_evaluation(output, target) - - def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, - step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, - validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, - segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True): - """ - We need to wrap this because we need to enforce self.network.do_ds = False for prediction - """ - ds = self.network.do_ds - self.network.do_ds = False - ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, - save_softmax=save_softmax, use_gaussian=use_gaussian, - overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, - all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs, - run_postprocessing_on_folds=run_postprocessing_on_folds) - - self.network.do_ds = ds - return ret - - def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, - mirror_axes: Tuple[int] = None, - use_sliding_window: bool = True, step_size: float = 0.5, - use_gaussian: bool = True, pad_border_mode: str = 'constant', - pad_kwargs: dict = None, all_in_gpu: bool = False, - verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: - """ - We need to wrap this because we need to enforce self.network.do_ds = False for prediction - """ - ds = self.network.do_ds - self.network.do_ds = False - ret = super().predict_preprocessed_data_return_seg_and_softmax(data[:1], - do_mirroring=do_mirroring, - mirror_axes=mirror_axes, - use_sliding_window=use_sliding_window, - step_size=step_size, use_gaussian=use_gaussian, - pad_border_mode=pad_border_mode, - pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, - verbose=verbose, - mixed_precision=mixed_precision) - self.network.do_ds = ds - return ret - - def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): - """ - gradient clipping improves training stability - - :param data_generator: - :param do_backprop: - :param run_online_evaluation: - :return: - """ - data_dict = next(data_generator) - data = data_dict['data'] - target = data_dict['target'] - - data = maybe_to_torch(data) - target = maybe_to_torch(target) - - if torch.cuda.is_available(): - data = to_cuda(data) - target = to_cuda(target) - - self.optimizer.zero_grad() - - if self.fp16: - with autocast(): - - output = self.network(data) - del data - - target0 = [target[i][:, [0]].to(torch.float16) for i in range(len(target))] - output0 = [output[i][:, 0:2] for i in range(len(output))] - - target1 = [target[i][:, [1]].to(torch.float16) for i in range(len(target))] - output1 = [output[i][:, 2:6] for i in range(len(output))] - - l_front = self.loss_front(output0, target0) - - l_zone = self.loss_zone(output1, target1) - l = 0.5*l_front + 0.5*l_zone - - if do_backprop: - self.amp_grad_scaler.scale(l).backward() - self.amp_grad_scaler.unscale_(self.optimizer) - torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) - self.amp_grad_scaler.step(self.optimizer) - self.amp_grad_scaler.update() - else: - output = self.network(data) - del data - l = self.loss(output, target) - - if do_backprop: - l.backward() - torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) - self.optimizer.step() - - if run_online_evaluation: - self.run_online_evaluation(output0, target0) - self.run_online_evaluation(output1, target1) - - del target - del target0 - del target1 - - del output - del output0 - del output1 - - - return l.detach().cpu().numpy() - - def do_split(self): - """ - The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded, - so always the same) and save it as splits_final.pkl file in the preprocessed data directory. - Sometimes you may want to create your own split for various reasons. For this you will need to create your own - splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in - it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3) - and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to - use a random 80:20 data split. - :return: - """ - if self.fold == "all": - # if fold==all then we use all images for training and validation - tr_keys = val_keys = list(self.dataset.keys()) - else: - splits_file = join(self.dataset_directory, "splits_final.pkl") - - # if the split file does not exist we need to create it - if not isfile(splits_file): - self.print_to_log_file("Creating new 5-fold cross-validation split...") - splits = [] - all_keys_sorted = np.sort(list(self.dataset.keys())) - kfold = KFold(n_splits=5, shuffle=True, random_state=12345) - for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)): - train_keys = np.array(all_keys_sorted)[train_idx] - test_keys = np.array(all_keys_sorted)[test_idx] - splits.append(OrderedDict()) - splits[-1]['train'] = train_keys - splits[-1]['val'] = test_keys - save_pickle(splits, splits_file) - - else: - self.print_to_log_file("Using splits from existing split file:", splits_file) - splits = load_pickle(splits_file) - self.print_to_log_file("The split file contains %d splits." % len(splits)) - - self.print_to_log_file("Desired fold for training: %d" % self.fold) - if self.fold < len(splits): - tr_keys = splits[self.fold]['train'] - val_keys = splits[self.fold]['val'] - self.print_to_log_file("This split has %d training and %d validation cases." - % (len(tr_keys), len(val_keys))) - else: - self.print_to_log_file("INFO: You requested fold %d for training but splits " - "contain only %d folds. I am now creating a " - "random (but seeded) 80:20 split!" % (self.fold, len(splits))) - # if we request a fold that is not in the split file, create a random 80:20 split - rnd = np.random.RandomState(seed=12345 + self.fold) - keys = np.sort(list(self.dataset.keys())) - idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False) - idx_val = [i for i in range(len(keys)) if i not in idx_tr] - tr_keys = [keys[i] for i in idx_tr] - val_keys = [keys[i] for i in idx_val] - self.print_to_log_file("This random 80:20 split has %d training and %d validation cases." - % (len(tr_keys), len(val_keys))) - - tr_keys.sort() - val_keys.sort() - self.dataset_tr = OrderedDict() - for i in tr_keys: - self.dataset_tr[i] = self.dataset[i] - self.dataset_val = OrderedDict() - for i in val_keys: - self.dataset_val[i] = self.dataset[i] - - def setup_DA_params(self): - """ - - we increase roation angle from [-15, 15] to [-30, 30] - - scale range is now (0.7, 1.4), was (0.85, 1.25) - - we don't do elastic deformation anymore - - :return: - """ - - self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( - np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] - - if self.threeD: - self.data_aug_params = default_3D_augmentation_params - self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) - self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) - self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) - if self.do_dummy_2D_aug: - self.data_aug_params["dummy_2D"] = True - self.print_to_log_file("Using dummy2d data augmentation") - self.data_aug_params["elastic_deform_alpha"] = \ - default_2D_augmentation_params["elastic_deform_alpha"] - self.data_aug_params["elastic_deform_sigma"] = \ - default_2D_augmentation_params["elastic_deform_sigma"] - self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] - else: - self.do_dummy_2D_aug = False - if max(self.patch_size) / min(self.patch_size) > 1.5: - default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) - self.data_aug_params = default_2D_augmentation_params - self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm - - if self.do_dummy_2D_aug: - self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], - self.data_aug_params['rotation_x'], - self.data_aug_params['rotation_y'], - self.data_aug_params['rotation_z'], - self.data_aug_params['scale_range']) - self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) - else: - self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], - self.data_aug_params['rotation_y'], - self.data_aug_params['rotation_z'], - self.data_aug_params['scale_range']) - - self.data_aug_params["scale_range"] = (0.7, 1.4) - self.data_aug_params["do_elastic"] = False - self.data_aug_params['selected_seg_channels'] = [0, 1] - self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size - - self.data_aug_params["num_cached_per_thread"] = 2 - - def maybe_update_lr(self, epoch=None): - """ - if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1 - - (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented. - herefore we need to do +1 here) - - :param epoch: - :return: - """ - if epoch is None: - ep = self.epoch + 1 - else: - ep = epoch - self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) - self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) - - def on_epoch_end(self): - """ - overwrite patient-based early stopping. Always run to 1000 epochs - :return: - """ - super().on_epoch_end() - continue_training = self.epoch < self.max_num_epochs - - # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the - # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95 - if self.epoch == 100: - if self.all_val_eval_metrics[-1] == 0: - self.optimizer.param_groups[0]["momentum"] = 0.95 - self.network.apply(InitWeights_He(1e-2)) - self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too " - "high momentum. High momentum (0.99) is good for datasets where it works, but " - "sometimes causes issues such as this one. Momentum has now been reduced to " - "0.95 and network weights have been reinitialized") - return continue_training - - def run_training(self): - """ - if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first - continued epoch with self.initial_lr - - we also need to make sure deep supervision in the network is enabled for training, thus the wrapper - :return: - """ - self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we - # want at the start of the training - ds = self.network.do_ds - self.network.do_ds = True - ret = super().run_training() - self.network.do_ds = ds - return ret diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/folder_names.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/folder_names.py deleted file mode 100644 index 708b132ebc62e7fdf1ba5f8e3a2c8baec44d33fb..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/folder_names.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from batchgenerators.utilities.file_and_folder_operations import * -from nnunet.paths import network_training_output_dir - - -def get_output_folder_name(model: str, task: str = None, trainer: str = None, plans: str = None, fold: int = None, - overwrite_training_output_dir: str = None): - """ - Retrieves the correct output directory for the nnU-Net model described by the input parameters - - :param model: - :param task: - :param trainer: - :param plans: - :param fold: - :param overwrite_training_output_dir: - :return: - """ - assert model in ["2d", "3d_cascade_fullres", '3d_fullres', '3d_lowres'] - - if overwrite_training_output_dir is not None: - tr_dir = overwrite_training_output_dir - else: - tr_dir = network_training_output_dir - - current = join(tr_dir, model) - if task is not None: - current = join(current, task) - if trainer is not None and plans is not None: - current = join(current, trainer + "__" + plans) - if fold is not None: - current = join(current, "fold_%d" % fold) - return current diff --git a/spaces/housexu123/bingo-2.0/src/components/ui/input.tsx b/spaces/housexu123/bingo-2.0/src/components/ui/input.tsx deleted file mode 100644 index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface InputProps - extends React.InputHTMLAttributes {} - -const Input = React.forwardRef( - ({ className, type, ...props }, ref) => { - return ( - - ) - } -) -Input.displayName = 'Input' - -export { Input } diff --git a/spaces/huggingchat/chat-ui/src/lib/server/models.ts b/spaces/huggingchat/chat-ui/src/lib/server/models.ts deleted file mode 100644 index 599dbbc7ff12c3fdaf99150e5d4a4b7f7c519112..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/lib/server/models.ts +++ /dev/null @@ -1,131 +0,0 @@ -import { HF_ACCESS_TOKEN, MODELS, OLD_MODELS, TASK_MODEL } from "$env/static/private"; -import type { ChatTemplateInput } from "$lib/types/Template"; -import { compileTemplate } from "$lib/utils/template"; -import { z } from "zod"; - -type Optional = Pick, K> & Omit; - -const sagemakerEndpoint = z.object({ - host: z.literal("sagemaker"), - url: z.string().url(), - accessKey: z.string().min(1), - secretKey: z.string().min(1), - sessionToken: z.string().optional(), -}); - -const tgiEndpoint = z.object({ - host: z.union([z.literal("tgi"), z.undefined()]), - url: z.string().url(), - authorization: z.string().min(1).default(`Bearer ${HF_ACCESS_TOKEN}`), -}); - -const commonEndpoint = z.object({ - weight: z.number().int().positive().default(1), -}); - -const endpoint = z.lazy(() => - z.union([sagemakerEndpoint.merge(commonEndpoint), tgiEndpoint.merge(commonEndpoint)]) -); - -const combinedEndpoint = endpoint.transform((data) => { - if (data.host === "tgi" || data.host === undefined) { - return tgiEndpoint.merge(commonEndpoint).parse(data); - } else if (data.host === "sagemaker") { - return sagemakerEndpoint.merge(commonEndpoint).parse(data); - } else { - throw new Error(`Invalid host: ${data.host}`); - } -}); - -const modelConfig = z.object({ - /** Used as an identifier in DB */ - id: z.string().optional(), - /** Used to link to the model page, and for inference */ - name: z.string().min(1), - displayName: z.string().min(1).optional(), - description: z.string().min(1).optional(), - websiteUrl: z.string().url().optional(), - modelUrl: z.string().url().optional(), - datasetName: z.string().min(1).optional(), - datasetUrl: z.string().url().optional(), - userMessageToken: z.string().default(""), - userMessageEndToken: z.string().default(""), - assistantMessageToken: z.string().default(""), - assistantMessageEndToken: z.string().default(""), - messageEndToken: z.string().default(""), - preprompt: z.string().default(""), - prepromptUrl: z.string().url().optional(), - chatPromptTemplate: z - .string() - .default( - "{{preprompt}}" + - "{{#each messages}}" + - "{{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}}" + - "{{#ifAssistant}}{{@root.assistantMessageToken}}{{content}}{{@root.assistantMessageEndToken}}{{/ifAssistant}}" + - "{{/each}}" + - "{{assistantMessageToken}}" - ), - promptExamples: z - .array( - z.object({ - title: z.string().min(1), - prompt: z.string().min(1), - }) - ) - .optional(), - endpoints: z.array(combinedEndpoint).optional(), - parameters: z - .object({ - temperature: z.number().min(0).max(1), - truncate: z.number().int().positive(), - max_new_tokens: z.number().int().positive(), - stop: z.array(z.string()).optional(), - }) - .passthrough() - .optional(), -}); - -const modelsRaw = z.array(modelConfig).parse(JSON.parse(MODELS)); - -const processModel = async (m: z.infer) => ({ - ...m, - userMessageEndToken: m?.userMessageEndToken || m?.messageEndToken, - assistantMessageEndToken: m?.assistantMessageEndToken || m?.messageEndToken, - chatPromptRender: compileTemplate(m.chatPromptTemplate, m), - id: m.id || m.name, - displayName: m.displayName || m.name, - preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt, - parameters: { ...m.parameters, stop_sequences: m.parameters?.stop }, -}); - -export const models = await Promise.all(modelsRaw.map(processModel)); - -// Models that have been deprecated -export const oldModels = OLD_MODELS - ? z - .array( - z.object({ - id: z.string().optional(), - name: z.string().min(1), - displayName: z.string().min(1).optional(), - }) - ) - .parse(JSON.parse(OLD_MODELS)) - .map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name })) - : []; - -export const defaultModel = models[0]; - -export const validateModel = (_models: BackendModel[]) => { - // Zod enum function requires 2 parameters - return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]); -}; - -// if `TASK_MODEL` is the name of a model we use it, else we try to parse `TASK_MODEL` as a model config itself -export const smallModel = TASK_MODEL - ? models.find((m) => m.name === TASK_MODEL) || - (await processModel(modelConfig.parse(JSON.parse(TASK_MODEL)))) - : defaultModel; - -export type BackendModel = Optional<(typeof models)[0], "preprompt" | "parameters">; -export type Endpoint = z.infer; diff --git a/spaces/huggingface-projects/diffuse-the-rest/build/_app/immutable/chunks/2-6ab63caf.js b/spaces/huggingface-projects/diffuse-the-rest/build/_app/immutable/chunks/2-6ab63caf.js deleted file mode 100644 index 01435cd888e3d6b3cf8645c01e8df844d9a470c1..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/diffuse-the-rest/build/_app/immutable/chunks/2-6ab63caf.js +++ /dev/null @@ -1 +0,0 @@ -import{default as m}from"../components/pages/_page.svelte-1525ec40.js";import"./index-032ac624.js";export{m as component}; diff --git a/spaces/huggingface-projects/wordalle/frontend/src/app.html b/spaces/huggingface-projects/wordalle/frontend/src/app.html deleted file mode 100644 index 38a665e54f9ea09554982a53da42bfbe9208f0b4..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/wordalle/frontend/src/app.html +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - %sveltekit.head% - - -
%sveltekit.body%
- - diff --git a/spaces/huggingface/README/README.md b/spaces/huggingface/README/README.md deleted file mode 100644 index 67bca046f214959dce44c669ab1afe39e8c2366f..0000000000000000000000000000000000000000 --- a/spaces/huggingface/README/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: README -emoji: 💻 -colorFrom: blue -colorTo: pink -sdk: static -pinned: false ---- - -

- 👋 Hi! -

-

- We are on a mission to democratize good machine learning, one commit at a time. -

-

- If that sounds like something you should be doing, why don't you join us! -

-

- For press enquiries, you can ✉️ contact our team here. -

\ No newline at end of file diff --git a/spaces/hussain-shk/IndiSent/indic_nlp_library/indicnlp/cli/__init__.py b/spaces/hussain-shk/IndiSent/indic_nlp_library/indicnlp/cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/huy-ha/semabs-relevancy/CLIP/data/prompts.md b/spaces/huy-ha/semabs-relevancy/CLIP/data/prompts.md deleted file mode 100644 index 6d8aaf7b13f04031e7ea00d58a1c131b98bdfe20..0000000000000000000000000000000000000000 --- a/spaces/huy-ha/semabs-relevancy/CLIP/data/prompts.md +++ /dev/null @@ -1,3401 +0,0 @@ -# Prompts for Image Classification - -Below are the class names and templates that are used for collecting the zero-shot classification scores in the paper. Each dataset has two lists `classes` and `templates`, where the string `{}` in the template is to be replaced with the corresponding class names. For the Facial Emotion Recognition 2013 dataset specifically, we used multiple class names for certain classes. - -This file contains prompt data for 26 of the 27 datasets shown in Table 9 of the paper; the text prompts for ImageNet (as well as other [ImageNet Testbed](https://modestyachts.github.io/imagenet-testbed/) datasets in Figure 13) can be found in [this notebook](https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb), as well as how to ensemble predictions from multiple prompts using these templates. - -If you are viewing this document on GitHub, use the table of contents icon at the upper left to browse the datasets. - - -## Birdsnap - -```bash -classes = [ - 'Acadian Flycatcher', - 'Acorn Woodpecker', - 'Alder Flycatcher', - 'Allens Hummingbird', - 'Altamira Oriole', - 'American Avocet', - 'American Bittern', - 'American Black Duck', - 'American Coot', - 'American Crow', - 'American Dipper', - 'American Golden Plover', - 'American Goldfinch', - 'American Kestrel', - 'American Oystercatcher', - 'American Pipit', - 'American Redstart', - 'American Robin', - 'American Three toed Woodpecker', - 'American Tree Sparrow', - 'American White Pelican', - 'American Wigeon', - 'American Woodcock', - 'Anhinga', - 'Annas Hummingbird', - 'Arctic Tern', - 'Ash throated Flycatcher', - 'Audubons Oriole', - 'Bairds Sandpiper', - 'Bald Eagle', - 'Baltimore Oriole', - 'Band tailed Pigeon', - 'Barn Swallow', - 'Barred Owl', - 'Barrows Goldeneye', - 'Bay breasted Warbler', - 'Bells Vireo', - 'Belted Kingfisher', - 'Bewicks Wren', - 'Black Guillemot', - 'Black Oystercatcher', - 'Black Phoebe', - 'Black Rosy Finch', - 'Black Scoter', - 'Black Skimmer', - 'Black Tern', - 'Black Turnstone', - 'Black Vulture', - 'Black and white Warbler', - 'Black backed Woodpecker', - 'Black bellied Plover', - 'Black billed Cuckoo', - 'Black billed Magpie', - 'Black capped Chickadee', - 'Black chinned Hummingbird', - 'Black chinned Sparrow', - 'Black crested Titmouse', - 'Black crowned Night Heron', - 'Black headed Grosbeak', - 'Black legged Kittiwake', - 'Black necked Stilt', - 'Black throated Blue Warbler', - 'Black throated Gray Warbler', - 'Black throated Green Warbler', - 'Black throated Sparrow', - 'Blackburnian Warbler', - 'Blackpoll Warbler', - 'Blue Grosbeak', - 'Blue Jay', - 'Blue gray Gnatcatcher', - 'Blue headed Vireo', - 'Blue winged Teal', - 'Blue winged Warbler', - 'Boat tailed Grackle', - 'Bobolink', - 'Bohemian Waxwing', - 'Bonapartes Gull', - 'Boreal Chickadee', - 'Brandts Cormorant', - 'Brant', - 'Brewers Blackbird', - 'Brewers Sparrow', - 'Bridled Titmouse', - 'Broad billed Hummingbird', - 'Broad tailed Hummingbird', - 'Broad winged Hawk', - 'Bronzed Cowbird', - 'Brown Creeper', - 'Brown Pelican', - 'Brown Thrasher', - 'Brown capped Rosy Finch', - 'Brown crested Flycatcher', - 'Brown headed Cowbird', - 'Brown headed Nuthatch', - 'Bufflehead', - 'Bullocks Oriole', - 'Burrowing Owl', - 'Bushtit', - 'Cackling Goose', - 'Cactus Wren', - 'California Gull', - 'California Quail', - 'California Thrasher', - 'California Towhee', - 'Calliope Hummingbird', - 'Canada Goose', - 'Canada Warbler', - 'Canvasback', - 'Canyon Towhee', - 'Canyon Wren', - 'Cape May Warbler', - 'Carolina Chickadee', - 'Carolina Wren', - 'Caspian Tern', - 'Cassins Finch', - 'Cassins Kingbird', - 'Cassins Sparrow', - 'Cassins Vireo', - 'Cattle Egret', - 'Cave Swallow', - 'Cedar Waxwing', - 'Cerulean Warbler', - 'Chestnut backed Chickadee', - 'Chestnut collared Longspur', - 'Chestnut sided Warbler', - 'Chihuahuan Raven', - 'Chimney Swift', - 'Chipping Sparrow', - 'Cinnamon Teal', - 'Clapper Rail', - 'Clarks Grebe', - 'Clarks Nutcracker', - 'Clay colored Sparrow', - 'Cliff Swallow', - 'Common Black Hawk', - 'Common Eider', - 'Common Gallinule', - 'Common Goldeneye', - 'Common Grackle', - 'Common Ground Dove', - 'Common Loon', - 'Common Merganser', - 'Common Murre', - 'Common Nighthawk', - 'Common Raven', - 'Common Redpoll', - 'Common Tern', - 'Common Yellowthroat', - 'Connecticut Warbler', - 'Coopers Hawk', - 'Cordilleran Flycatcher', - 'Costas Hummingbird', - 'Couchs Kingbird', - 'Crested Caracara', - 'Curve billed Thrasher', - 'Dark eyed Junco', - 'Dickcissel', - 'Double crested Cormorant', - 'Downy Woodpecker', - 'Dunlin', - 'Dusky Flycatcher', - 'Dusky Grouse', - 'Eared Grebe', - 'Eastern Bluebird', - 'Eastern Kingbird', - 'Eastern Meadowlark', - 'Eastern Phoebe', - 'Eastern Screech Owl', - 'Eastern Towhee', - 'Eastern Wood Pewee', - 'Elegant Trogon', - 'Elf Owl', - 'Eurasian Collared Dove', - 'Eurasian Wigeon', - 'European Starling', - 'Evening Grosbeak', - 'Ferruginous Hawk', - 'Ferruginous Pygmy Owl', - 'Field Sparrow', - 'Fish Crow', - 'Florida Scrub Jay', - 'Forsters Tern', - 'Fox Sparrow', - 'Franklins Gull', - 'Fulvous Whistling Duck', - 'Gadwall', - 'Gambels Quail', - 'Gila Woodpecker', - 'Glaucous Gull', - 'Glaucous winged Gull', - 'Glossy Ibis', - 'Golden Eagle', - 'Golden crowned Kinglet', - 'Golden crowned Sparrow', - 'Golden fronted Woodpecker', - 'Golden winged Warbler', - 'Grasshopper Sparrow', - 'Gray Catbird', - 'Gray Flycatcher', - 'Gray Jay', - 'Gray Kingbird', - 'Gray cheeked Thrush', - 'Gray crowned Rosy Finch', - 'Great Black backed Gull', - 'Great Blue Heron', - 'Great Cormorant', - 'Great Crested Flycatcher', - 'Great Egret', - 'Great Gray Owl', - 'Great Horned Owl', - 'Great Kiskadee', - 'Great tailed Grackle', - 'Greater Prairie Chicken', - 'Greater Roadrunner', - 'Greater Sage Grouse', - 'Greater Scaup', - 'Greater White fronted Goose', - 'Greater Yellowlegs', - 'Green Jay', - 'Green tailed Towhee', - 'Green winged Teal', - 'Groove billed Ani', - 'Gull billed Tern', - 'Hairy Woodpecker', - 'Hammonds Flycatcher', - 'Harlequin Duck', - 'Harriss Hawk', - 'Harriss Sparrow', - 'Heermanns Gull', - 'Henslows Sparrow', - 'Hepatic Tanager', - 'Hermit Thrush', - 'Herring Gull', - 'Hoary Redpoll', - 'Hooded Merganser', - 'Hooded Oriole', - 'Hooded Warbler', - 'Horned Grebe', - 'Horned Lark', - 'House Finch', - 'House Sparrow', - 'House Wren', - 'Huttons Vireo', - 'Iceland Gull', - 'Inca Dove', - 'Indigo Bunting', - 'Killdeer', - 'King Rail', - 'Ladder backed Woodpecker', - 'Lapland Longspur', - 'Lark Bunting', - 'Lark Sparrow', - 'Laughing Gull', - 'Lazuli Bunting', - 'Le Contes Sparrow', - 'Least Bittern', - 'Least Flycatcher', - 'Least Grebe', - 'Least Sandpiper', - 'Least Tern', - 'Lesser Goldfinch', - 'Lesser Nighthawk', - 'Lesser Scaup', - 'Lesser Yellowlegs', - 'Lewiss Woodpecker', - 'Limpkin', - 'Lincolns Sparrow', - 'Little Blue Heron', - 'Loggerhead Shrike', - 'Long billed Curlew', - 'Long billed Dowitcher', - 'Long billed Thrasher', - 'Long eared Owl', - 'Long tailed Duck', - 'Louisiana Waterthrush', - 'Magnificent Frigatebird', - 'Magnolia Warbler', - 'Mallard', - 'Marbled Godwit', - 'Marsh Wren', - 'Merlin', - 'Mew Gull', - 'Mexican Jay', - 'Mississippi Kite', - 'Monk Parakeet', - 'Mottled Duck', - 'Mountain Bluebird', - 'Mountain Chickadee', - 'Mountain Plover', - 'Mourning Dove', - 'Mourning Warbler', - 'Muscovy Duck', - 'Mute Swan', - 'Nashville Warbler', - 'Nelsons Sparrow', - 'Neotropic Cormorant', - 'Northern Bobwhite', - 'Northern Cardinal', - 'Northern Flicker', - 'Northern Gannet', - 'Northern Goshawk', - 'Northern Harrier', - 'Northern Hawk Owl', - 'Northern Mockingbird', - 'Northern Parula', - 'Northern Pintail', - 'Northern Rough winged Swallow', - 'Northern Saw whet Owl', - 'Northern Shrike', - 'Northern Waterthrush', - 'Nuttalls Woodpecker', - 'Oak Titmouse', - 'Olive Sparrow', - 'Olive sided Flycatcher', - 'Orange crowned Warbler', - 'Orchard Oriole', - 'Osprey', - 'Ovenbird', - 'Pacific Golden Plover', - 'Pacific Loon', - 'Pacific Wren', - 'Pacific slope Flycatcher', - 'Painted Bunting', - 'Painted Redstart', - 'Palm Warbler', - 'Pectoral Sandpiper', - 'Peregrine Falcon', - 'Phainopepla', - 'Philadelphia Vireo', - 'Pied billed Grebe', - 'Pigeon Guillemot', - 'Pileated Woodpecker', - 'Pine Grosbeak', - 'Pine Siskin', - 'Pine Warbler', - 'Piping Plover', - 'Plumbeous Vireo', - 'Prairie Falcon', - 'Prairie Warbler', - 'Prothonotary Warbler', - 'Purple Finch', - 'Purple Gallinule', - 'Purple Martin', - 'Purple Sandpiper', - 'Pygmy Nuthatch', - 'Pyrrhuloxia', - 'Red Crossbill', - 'Red Knot', - 'Red Phalarope', - 'Red bellied Woodpecker', - 'Red breasted Merganser', - 'Red breasted Nuthatch', - 'Red breasted Sapsucker', - 'Red cockaded Woodpecker', - 'Red eyed Vireo', - 'Red headed Woodpecker', - 'Red naped Sapsucker', - 'Red necked Grebe', - 'Red necked Phalarope', - 'Red shouldered Hawk', - 'Red tailed Hawk', - 'Red throated Loon', - 'Red winged Blackbird', - 'Reddish Egret', - 'Redhead', - 'Ring billed Gull', - 'Ring necked Duck', - 'Ring necked Pheasant', - 'Rock Pigeon', - 'Rock Ptarmigan', - 'Rock Sandpiper', - 'Rock Wren', - 'Rose breasted Grosbeak', - 'Roseate Tern', - 'Rosss Goose', - 'Rough legged Hawk', - 'Royal Tern', - 'Ruby crowned Kinglet', - 'Ruby throated Hummingbird', - 'Ruddy Duck', - 'Ruddy Turnstone', - 'Ruffed Grouse', - 'Rufous Hummingbird', - 'Rufous crowned Sparrow', - 'Rusty Blackbird', - 'Sage Thrasher', - 'Saltmarsh Sparrow', - 'Sanderling', - 'Sandhill Crane', - 'Sandwich Tern', - 'Says Phoebe', - 'Scaled Quail', - 'Scarlet Tanager', - 'Scissor tailed Flycatcher', - 'Scotts Oriole', - 'Seaside Sparrow', - 'Sedge Wren', - 'Semipalmated Plover', - 'Semipalmated Sandpiper', - 'Sharp shinned Hawk', - 'Sharp tailed Grouse', - 'Short billed Dowitcher', - 'Short eared Owl', - 'Snail Kite', - 'Snow Bunting', - 'Snow Goose', - 'Snowy Egret', - 'Snowy Owl', - 'Snowy Plover', - 'Solitary Sandpiper', - 'Song Sparrow', - 'Sooty Grouse', - 'Sora', - 'Spotted Owl', - 'Spotted Sandpiper', - 'Spotted Towhee', - 'Spruce Grouse', - 'Stellers Jay', - 'Stilt Sandpiper', - 'Summer Tanager', - 'Surf Scoter', - 'Surfbird', - 'Swainsons Hawk', - 'Swainsons Thrush', - 'Swallow tailed Kite', - 'Swamp Sparrow', - 'Tennessee Warbler', - 'Thayers Gull', - 'Townsends Solitaire', - 'Townsends Warbler', - 'Tree Swallow', - 'Tricolored Heron', - 'Tropical Kingbird', - 'Trumpeter Swan', - 'Tufted Titmouse', - 'Tundra Swan', - 'Turkey Vulture', - 'Upland Sandpiper', - 'Varied Thrush', - 'Veery', - 'Verdin', - 'Vermilion Flycatcher', - 'Vesper Sparrow', - 'Violet green Swallow', - 'Virginia Rail', - 'Wandering Tattler', - 'Warbling Vireo', - 'Western Bluebird', - 'Western Grebe', - 'Western Gull', - 'Western Kingbird', - 'Western Meadowlark', - 'Western Sandpiper', - 'Western Screech Owl', - 'Western Scrub Jay', - 'Western Tanager', - 'Western Wood Pewee', - 'Whimbrel', - 'White Ibis', - 'White breasted Nuthatch', - 'White crowned Sparrow', - 'White eyed Vireo', - 'White faced Ibis', - 'White headed Woodpecker', - 'White rumped Sandpiper', - 'White tailed Hawk', - 'White tailed Kite', - 'White tailed Ptarmigan', - 'White throated Sparrow', - 'White throated Swift', - 'White winged Crossbill', - 'White winged Dove', - 'White winged Scoter', - 'Wild Turkey', - 'Willet', - 'Williamsons Sapsucker', - 'Willow Flycatcher', - 'Willow Ptarmigan', - 'Wilsons Phalarope', - 'Wilsons Plover', - 'Wilsons Snipe', - 'Wilsons Warbler', - 'Winter Wren', - 'Wood Stork', - 'Wood Thrush', - 'Worm eating Warbler', - 'Wrentit', - 'Yellow Warbler', - 'Yellow bellied Flycatcher', - 'Yellow bellied Sapsucker', - 'Yellow billed Cuckoo', - 'Yellow billed Magpie', - 'Yellow breasted Chat', - 'Yellow crowned Night Heron', - 'Yellow eyed Junco', - 'Yellow headed Blackbird', - 'Yellow rumped Warbler', - 'Yellow throated Vireo', - 'Yellow throated Warbler', - 'Zone tailed Hawk', -] - -templates = [ - 'a photo of a {}, a type of bird.', -] -``` - - - -## CIFAR10 - -```bash -classes = [ - 'airplane', - 'automobile', - 'bird', - 'cat', - 'deer', - 'dog', - 'frog', - 'horse', - 'ship', - 'truck', -] - -templates = [ - 'a photo of a {}.', - 'a blurry photo of a {}.', - 'a black and white photo of a {}.', - 'a low contrast photo of a {}.', - 'a high contrast photo of a {}.', - 'a bad photo of a {}.', - 'a good photo of a {}.', - 'a photo of a small {}.', - 'a photo of a big {}.', - 'a photo of the {}.', - 'a blurry photo of the {}.', - 'a black and white photo of the {}.', - 'a low contrast photo of the {}.', - 'a high contrast photo of the {}.', - 'a bad photo of the {}.', - 'a good photo of the {}.', - 'a photo of the small {}.', - 'a photo of the big {}.', -] -``` - - - -## CIFAR100 - -```bash -classes = [ - 'apple', - 'aquarium fish', - 'baby', - 'bear', - 'beaver', - 'bed', - 'bee', - 'beetle', - 'bicycle', - 'bottle', - 'bowl', - 'boy', - 'bridge', - 'bus', - 'butterfly', - 'camel', - 'can', - 'castle', - 'caterpillar', - 'cattle', - 'chair', - 'chimpanzee', - 'clock', - 'cloud', - 'cockroach', - 'couch', - 'crab', - 'crocodile', - 'cup', - 'dinosaur', - 'dolphin', - 'elephant', - 'flatfish', - 'forest', - 'fox', - 'girl', - 'hamster', - 'house', - 'kangaroo', - 'keyboard', - 'lamp', - 'lawn mower', - 'leopard', - 'lion', - 'lizard', - 'lobster', - 'man', - 'maple tree', - 'motorcycle', - 'mountain', - 'mouse', - 'mushroom', - 'oak tree', - 'orange', - 'orchid', - 'otter', - 'palm tree', - 'pear', - 'pickup truck', - 'pine tree', - 'plain', - 'plate', - 'poppy', - 'porcupine', - 'possum', - 'rabbit', - 'raccoon', - 'ray', - 'road', - 'rocket', - 'rose', - 'sea', - 'seal', - 'shark', - 'shrew', - 'skunk', - 'skyscraper', - 'snail', - 'snake', - 'spider', - 'squirrel', - 'streetcar', - 'sunflower', - 'sweet pepper', - 'table', - 'tank', - 'telephone', - 'television', - 'tiger', - 'tractor', - 'train', - 'trout', - 'tulip', - 'turtle', - 'wardrobe', - 'whale', - 'willow tree', - 'wolf', - 'woman', - 'worm', -] - -templates = [ - 'a photo of a {}.', - 'a blurry photo of a {}.', - 'a black and white photo of a {}.', - 'a low contrast photo of a {}.', - 'a high contrast photo of a {}.', - 'a bad photo of a {}.', - 'a good photo of a {}.', - 'a photo of a small {}.', - 'a photo of a big {}.', - 'a photo of the {}.', - 'a blurry photo of the {}.', - 'a black and white photo of the {}.', - 'a low contrast photo of the {}.', - 'a high contrast photo of the {}.', - 'a bad photo of the {}.', - 'a good photo of the {}.', - 'a photo of the small {}.', - 'a photo of the big {}.', -] -``` - - - -## CLEVRCounts - -```bash -classes = [ - '10', - '3', - '4', - '5', - '6', - '7', - '8', - '9', -] - -templates = [ - 'a photo of {} objects.', -] -``` - - - -## Caltech101 - -```bash -classes = [ - 'background', - 'off-center face', - 'centered face', - 'leopard', - 'motorbike', - 'accordion', - 'airplane', - 'anchor', - 'ant', - 'barrel', - 'bass', - 'beaver', - 'binocular', - 'bonsai', - 'brain', - 'brontosaurus', - 'buddha', - 'butterfly', - 'camera', - 'cannon', - 'side of a car', - 'ceiling fan', - 'cellphone', - 'chair', - 'chandelier', - 'body of a cougar cat', - 'face of a cougar cat', - 'crab', - 'crayfish', - 'crocodile', - 'head of a crocodile', - 'cup', - 'dalmatian', - 'dollar bill', - 'dolphin', - 'dragonfly', - 'electric guitar', - 'elephant', - 'emu', - 'euphonium', - 'ewer', - 'ferry', - 'flamingo', - 'head of a flamingo', - 'garfield', - 'gerenuk', - 'gramophone', - 'grand piano', - 'hawksbill', - 'headphone', - 'hedgehog', - 'helicopter', - 'ibis', - 'inline skate', - 'joshua tree', - 'kangaroo', - 'ketch', - 'lamp', - 'laptop', - 'llama', - 'lobster', - 'lotus', - 'mandolin', - 'mayfly', - 'menorah', - 'metronome', - 'minaret', - 'nautilus', - 'octopus', - 'okapi', - 'pagoda', - 'panda', - 'pigeon', - 'pizza', - 'platypus', - 'pyramid', - 'revolver', - 'rhino', - 'rooster', - 'saxophone', - 'schooner', - 'scissors', - 'scorpion', - 'sea horse', - 'snoopy (cartoon beagle)', - 'soccer ball', - 'stapler', - 'starfish', - 'stegosaurus', - 'stop sign', - 'strawberry', - 'sunflower', - 'tick', - 'trilobite', - 'umbrella', - 'watch', - 'water lilly', - 'wheelchair', - 'wild cat', - 'windsor chair', - 'wrench', - 'yin and yang symbol', -] - -templates = [ - 'a photo of a {}.', - 'a painting of a {}.', - 'a plastic {}.', - 'a sculpture of a {}.', - 'a sketch of a {}.', - 'a tattoo of a {}.', - 'a toy {}.', - 'a rendition of a {}.', - 'a embroidered {}.', - 'a cartoon {}.', - 'a {} in a video game.', - 'a plushie {}.', - 'a origami {}.', - 'art of a {}.', - 'graffiti of a {}.', - 'a drawing of a {}.', - 'a doodle of a {}.', - 'a photo of the {}.', - 'a painting of the {}.', - 'the plastic {}.', - 'a sculpture of the {}.', - 'a sketch of the {}.', - 'a tattoo of the {}.', - 'the toy {}.', - 'a rendition of the {}.', - 'the embroidered {}.', - 'the cartoon {}.', - 'the {} in a video game.', - 'the plushie {}.', - 'the origami {}.', - 'art of the {}.', - 'graffiti of the {}.', - 'a drawing of the {}.', - 'a doodle of the {}.', -] -``` - - - -## Country211 - -```bash -classes = [ - 'Andorra', - 'United Arab Emirates', - 'Afghanistan', - 'Antigua and Barbuda', - 'Anguilla', - 'Albania', - 'Armenia', - 'Angola', - 'Antarctica', - 'Argentina', - 'Austria', - 'Australia', - 'Aruba', - 'Aland Islands', - 'Azerbaijan', - 'Bosnia and Herzegovina', - 'Barbados', - 'Bangladesh', - 'Belgium', - 'Burkina Faso', - 'Bulgaria', - 'Bahrain', - 'Benin', - 'Bermuda', - 'Brunei Darussalam', - 'Bolivia', - 'Bonaire, Saint Eustatius and Saba', - 'Brazil', - 'Bahamas', - 'Bhutan', - 'Botswana', - 'Belarus', - 'Belize', - 'Canada', - 'DR Congo', - 'Central African Republic', - 'Switzerland', - "Cote d'Ivoire", - 'Cook Islands', - 'Chile', - 'Cameroon', - 'China', - 'Colombia', - 'Costa Rica', - 'Cuba', - 'Cabo Verde', - 'Curacao', - 'Cyprus', - 'Czech Republic', - 'Germany', - 'Denmark', - 'Dominica', - 'Dominican Republic', - 'Algeria', - 'Ecuador', - 'Estonia', - 'Egypt', - 'Spain', - 'Ethiopia', - 'Finland', - 'Fiji', - 'Falkland Islands', - 'Faeroe Islands', - 'France', - 'Gabon', - 'United Kingdom', - 'Grenada', - 'Georgia', - 'French Guiana', - 'Guernsey', - 'Ghana', - 'Gibraltar', - 'Greenland', - 'Gambia', - 'Guadeloupe', - 'Greece', - 'South Georgia and South Sandwich Is.', - 'Guatemala', - 'Guam', - 'Guyana', - 'Hong Kong', - 'Honduras', - 'Croatia', - 'Haiti', - 'Hungary', - 'Indonesia', - 'Ireland', - 'Israel', - 'Isle of Man', - 'India', - 'Iraq', - 'Iran', - 'Iceland', - 'Italy', - 'Jersey', - 'Jamaica', - 'Jordan', - 'Japan', - 'Kenya', - 'Kyrgyz Republic', - 'Cambodia', - 'St. Kitts and Nevis', - 'North Korea', - 'South Korea', - 'Kuwait', - 'Cayman Islands', - 'Kazakhstan', - 'Laos', - 'Lebanon', - 'St. Lucia', - 'Liechtenstein', - 'Sri Lanka', - 'Liberia', - 'Lithuania', - 'Luxembourg', - 'Latvia', - 'Libya', - 'Morocco', - 'Monaco', - 'Moldova', - 'Montenegro', - 'Saint-Martin', - 'Madagascar', - 'Macedonia', - 'Mali', - 'Myanmar', - 'Mongolia', - 'Macau', - 'Martinique', - 'Mauritania', - 'Malta', - 'Mauritius', - 'Maldives', - 'Malawi', - 'Mexico', - 'Malaysia', - 'Mozambique', - 'Namibia', - 'New Caledonia', - 'Nigeria', - 'Nicaragua', - 'Netherlands', - 'Norway', - 'Nepal', - 'New Zealand', - 'Oman', - 'Panama', - 'Peru', - 'French Polynesia', - 'Papua New Guinea', - 'Philippines', - 'Pakistan', - 'Poland', - 'Puerto Rico', - 'Palestine', - 'Portugal', - 'Palau', - 'Paraguay', - 'Qatar', - 'Reunion', - 'Romania', - 'Serbia', - 'Russia', - 'Rwanda', - 'Saudi Arabia', - 'Solomon Islands', - 'Seychelles', - 'Sudan', - 'Sweden', - 'Singapore', - 'St. Helena', - 'Slovenia', - 'Svalbard and Jan Mayen Islands', - 'Slovakia', - 'Sierra Leone', - 'San Marino', - 'Senegal', - 'Somalia', - 'South Sudan', - 'El Salvador', - 'Sint Maarten', - 'Syria', - 'Eswatini', - 'Togo', - 'Thailand', - 'Tajikistan', - 'Timor-Leste', - 'Turkmenistan', - 'Tunisia', - 'Tonga', - 'Turkey', - 'Trinidad and Tobago', - 'Taiwan', - 'Tanzania', - 'Ukraine', - 'Uganda', - 'United States', - 'Uruguay', - 'Uzbekistan', - 'Vatican', - 'Venezuela', - 'British Virgin Islands', - 'United States Virgin Islands', - 'Vietnam', - 'Vanuatu', - 'Samoa', - 'Kosovo', - 'Yemen', - 'South Africa', - 'Zambia', - 'Zimbabwe', -] - -templates = [ - 'a photo i took in {}.', - 'a photo i took while visiting {}.', - 'a photo from my home country of {}.', - 'a photo from my visit to {}.', - 'a photo showing the country of {}.', -] -``` - - - -## DescribableTextures - -```bash -classes = [ - 'banded', - 'blotchy', - 'braided', - 'bubbly', - 'bumpy', - 'chequered', - 'cobwebbed', - 'cracked', - 'crosshatched', - 'crystalline', - 'dotted', - 'fibrous', - 'flecked', - 'freckled', - 'frilly', - 'gauzy', - 'grid', - 'grooved', - 'honeycombed', - 'interlaced', - 'knitted', - 'lacelike', - 'lined', - 'marbled', - 'matted', - 'meshed', - 'paisley', - 'perforated', - 'pitted', - 'pleated', - 'polka-dotted', - 'porous', - 'potholed', - 'scaly', - 'smeared', - 'spiralled', - 'sprinkled', - 'stained', - 'stratified', - 'striped', - 'studded', - 'swirly', - 'veined', - 'waffled', - 'woven', - 'wrinkled', - 'zigzagged', -] - -templates = [ - 'a photo of a {} texture.', - 'a photo of a {} pattern.', - 'a photo of a {} thing.', - 'a photo of a {} object.', - 'a photo of the {} texture.', - 'a photo of the {} pattern.', - 'a photo of the {} thing.', - 'a photo of the {} object.', -] -``` - - - -## EuroSAT - -```bash -classes = [ - 'forest', - 'permanent crop land', - 'residential buildings or homes or apartments', - 'river', - 'pasture land', - 'lake or sea', - 'brushland or shrubland', - 'annual crop land', - 'industrial buildings or commercial buildings', - 'highway or road', -] - -templates = [ - 'a centered satellite photo of {}.', - 'a centered satellite photo of a {}.', - 'a centered satellite photo of the {}.', -] -``` - - - -## FGVCAircraft - -```bash -classes = [ - '707-320', - '727-200', - '737-200', - '737-300', - '737-400', - '737-500', - '737-600', - '737-700', - '737-800', - '737-900', - '747-100', - '747-200', - '747-300', - '747-400', - '757-200', - '757-300', - '767-200', - '767-300', - '767-400', - '777-200', - '777-300', - 'A300B4', - 'A310', - 'A318', - 'A319', - 'A320', - 'A321', - 'A330-200', - 'A330-300', - 'A340-200', - 'A340-300', - 'A340-500', - 'A340-600', - 'A380', - 'ATR-42', - 'ATR-72', - 'An-12', - 'BAE 146-200', - 'BAE 146-300', - 'BAE-125', - 'Beechcraft 1900', - 'Boeing 717', - 'C-130', - 'C-47', - 'CRJ-200', - 'CRJ-700', - 'CRJ-900', - 'Cessna 172', - 'Cessna 208', - 'Cessna 525', - 'Cessna 560', - 'Challenger 600', - 'DC-10', - 'DC-3', - 'DC-6', - 'DC-8', - 'DC-9-30', - 'DH-82', - 'DHC-1', - 'DHC-6', - 'DHC-8-100', - 'DHC-8-300', - 'DR-400', - 'Dornier 328', - 'E-170', - 'E-190', - 'E-195', - 'EMB-120', - 'ERJ 135', - 'ERJ 145', - 'Embraer Legacy 600', - 'Eurofighter Typhoon', - 'F-16A/B', - 'F/A-18', - 'Falcon 2000', - 'Falcon 900', - 'Fokker 100', - 'Fokker 50', - 'Fokker 70', - 'Global Express', - 'Gulfstream IV', - 'Gulfstream V', - 'Hawk T1', - 'Il-76', - 'L-1011', - 'MD-11', - 'MD-80', - 'MD-87', - 'MD-90', - 'Metroliner', - 'Model B200', - 'PA-28', - 'SR-20', - 'Saab 2000', - 'Saab 340', - 'Spitfire', - 'Tornado', - 'Tu-134', - 'Tu-154', - 'Yak-42', -] - -templates = [ - 'a photo of a {}, a type of aircraft.', - 'a photo of the {}, a type of aircraft.', -] -``` - - - -## FacialEmotionRecognition2013 - -```bash -classes = [ - ['angry'], - ['disgusted'], - ['fearful'], - ['happy', 'smiling'], - ['sad', 'depressed'], - ['surprised', 'shocked', 'spooked'], - ['neutral', 'bored'], -] - -templates = [ - 'a photo of a {} looking face.', - 'a photo of a face showing the emotion: {}.', - 'a photo of a face looking {}.', - 'a face that looks {}.', - 'they look {}.', - 'look at how {} they are.', -] -``` - - - -## Flowers102 - -```bash -classes = [ - 'pink primrose', - 'hard-leaved pocket orchid', - 'canterbury bells', - 'sweet pea', - 'english marigold', - 'tiger lily', - 'moon orchid', - 'bird of paradise', - 'monkshood', - 'globe thistle', - 'snapdragon', - "colt's foot", - 'king protea', - 'spear thistle', - 'yellow iris', - 'globe flower', - 'purple coneflower', - 'peruvian lily', - 'balloon flower', - 'giant white arum lily', - 'fire lily', - 'pincushion flower', - 'fritillary', - 'red ginger', - 'grape hyacinth', - 'corn poppy', - 'prince of wales feathers', - 'stemless gentian', - 'artichoke', - 'sweet william', - 'carnation', - 'garden phlox', - 'love in the mist', - 'mexican aster', - 'alpine sea holly', - 'ruby-lipped cattleya', - 'cape flower', - 'great masterwort', - 'siam tulip', - 'lenten rose', - 'barbeton daisy', - 'daffodil', - 'sword lily', - 'poinsettia', - 'bolero deep blue', - 'wallflower', - 'marigold', - 'buttercup', - 'oxeye daisy', - 'common dandelion', - 'petunia', - 'wild pansy', - 'primula', - 'sunflower', - 'pelargonium', - 'bishop of llandaff', - 'gaura', - 'geranium', - 'orange dahlia', - 'pink and yellow dahlia', - 'cautleya spicata', - 'japanese anemone', - 'black-eyed susan', - 'silverbush', - 'californian poppy', - 'osteospermum', - 'spring crocus', - 'bearded iris', - 'windflower', - 'tree poppy', - 'gazania', - 'azalea', - 'water lily', - 'rose', - 'thorn apple', - 'morning glory', - 'passion flower', - 'lotus', - 'toad lily', - 'anthurium', - 'frangipani', - 'clematis', - 'hibiscus', - 'columbine', - 'desert-rose', - 'tree mallow', - 'magnolia', - 'cyclamen', - 'watercress', - 'canna lily', - 'hippeastrum', - 'bee balm', - 'air plant', - 'foxglove', - 'bougainvillea', - 'camellia', - 'mallow', - 'mexican petunia', - 'bromelia', - 'blanket flower', - 'trumpet creeper', - 'blackberry lily', -] - -templates = [ - 'a photo of a {}, a type of flower.', -] -``` - - - -## Food101 - -```bash -classes = [ - 'apple pie', - 'baby back ribs', - 'baklava', - 'beef carpaccio', - 'beef tartare', - 'beet salad', - 'beignets', - 'bibimbap', - 'bread pudding', - 'breakfast burrito', - 'bruschetta', - 'caesar salad', - 'cannoli', - 'caprese salad', - 'carrot cake', - 'ceviche', - 'cheese plate', - 'cheesecake', - 'chicken curry', - 'chicken quesadilla', - 'chicken wings', - 'chocolate cake', - 'chocolate mousse', - 'churros', - 'clam chowder', - 'club sandwich', - 'crab cakes', - 'creme brulee', - 'croque madame', - 'cup cakes', - 'deviled eggs', - 'donuts', - 'dumplings', - 'edamame', - 'eggs benedict', - 'escargots', - 'falafel', - 'filet mignon', - 'fish and chips', - 'foie gras', - 'french fries', - 'french onion soup', - 'french toast', - 'fried calamari', - 'fried rice', - 'frozen yogurt', - 'garlic bread', - 'gnocchi', - 'greek salad', - 'grilled cheese sandwich', - 'grilled salmon', - 'guacamole', - 'gyoza', - 'hamburger', - 'hot and sour soup', - 'hot dog', - 'huevos rancheros', - 'hummus', - 'ice cream', - 'lasagna', - 'lobster bisque', - 'lobster roll sandwich', - 'macaroni and cheese', - 'macarons', - 'miso soup', - 'mussels', - 'nachos', - 'omelette', - 'onion rings', - 'oysters', - 'pad thai', - 'paella', - 'pancakes', - 'panna cotta', - 'peking duck', - 'pho', - 'pizza', - 'pork chop', - 'poutine', - 'prime rib', - 'pulled pork sandwich', - 'ramen', - 'ravioli', - 'red velvet cake', - 'risotto', - 'samosa', - 'sashimi', - 'scallops', - 'seaweed salad', - 'shrimp and grits', - 'spaghetti bolognese', - 'spaghetti carbonara', - 'spring rolls', - 'steak', - 'strawberry shortcake', - 'sushi', - 'tacos', - 'takoyaki', - 'tiramisu', - 'tuna tartare', - 'waffles', -] - -templates = [ - 'a photo of {}, a type of food.', -] -``` - - - -## GTSRB - -```bash -classes = [ - 'red and white circle 20 kph speed limit', - 'red and white circle 30 kph speed limit', - 'red and white circle 50 kph speed limit', - 'red and white circle 60 kph speed limit', - 'red and white circle 70 kph speed limit', - 'red and white circle 80 kph speed limit', - 'end / de-restriction of 80 kph speed limit', - 'red and white circle 100 kph speed limit', - 'red and white circle 120 kph speed limit', - 'red and white circle red car and black car no passing', - 'red and white circle red truck and black car no passing', - 'red and white triangle road intersection warning', - 'white and yellow diamond priority road', - 'red and white upside down triangle yield right-of-way', - 'stop', - 'empty red and white circle', - 'red and white circle no truck entry', - 'red circle with white horizonal stripe no entry', - 'red and white triangle with exclamation mark warning', - 'red and white triangle with black left curve approaching warning', - 'red and white triangle with black right curve approaching warning', - 'red and white triangle with black double curve approaching warning', - 'red and white triangle rough / bumpy road warning', - 'red and white triangle car skidding / slipping warning', - 'red and white triangle with merging / narrow lanes warning', - 'red and white triangle with person digging / construction / road work warning', - 'red and white triangle with traffic light approaching warning', - 'red and white triangle with person walking warning', - 'red and white triangle with child and person walking warning', - 'red and white triangle with bicyle warning', - 'red and white triangle with snowflake / ice warning', - 'red and white triangle with deer warning', - 'white circle with gray strike bar no speed limit', - 'blue circle with white right turn arrow mandatory', - 'blue circle with white left turn arrow mandatory', - 'blue circle with white forward arrow mandatory', - 'blue circle with white forward or right turn arrow mandatory', - 'blue circle with white forward or left turn arrow mandatory', - 'blue circle with white keep right arrow mandatory', - 'blue circle with white keep left arrow mandatory', - 'blue circle with white arrows indicating a traffic circle', - 'white circle with gray strike bar indicating no passing for cars has ended', - 'white circle with gray strike bar indicating no passing for trucks has ended', -] - -templates = [ - 'a zoomed in photo of a "{}" traffic sign.', - 'a centered photo of a "{}" traffic sign.', - 'a close up photo of a "{}" traffic sign.', -] -``` - - - -## HatefulMemes - -```bash -classes = [ - 'meme', - 'hatespeech meme', -] - -templates = [ - 'a {}.', -] -``` - - - -## KITTI - -```bash -classes = [ - 'a photo i took of a car on my left or right side.', - 'a photo i took with a car nearby.', - 'a photo i took with a car in the distance.', - 'a photo i took with no car.', -] - -templates = [ - '{}', -] -``` - - - -## Kinetics700 - -```bash -classes = [ - 'abseiling', - 'acting in play', - 'adjusting glasses', - 'air drumming', - 'alligator wrestling', - 'answering questions', - 'applauding', - 'applying cream', - 'archaeological excavation', - 'archery', - 'arguing', - 'arm wrestling', - 'arranging flowers', - 'arresting', - 'assembling bicycle', - 'assembling computer', - 'attending conference', - 'auctioning', - 'baby waking up', - 'backflip (human)', - 'baking cookies', - 'bandaging', - 'barbequing', - 'bartending', - 'base jumping', - 'bathing dog', - 'battle rope training', - 'beatboxing', - 'bee keeping', - 'being excited', - 'being in zero gravity', - 'belly dancing', - 'bench pressing', - 'bending back', - 'bending metal', - 'biking through snow', - 'blasting sand', - 'blending fruit', - 'blowdrying hair', - 'blowing bubble gum', - 'blowing glass', - 'blowing leaves', - 'blowing nose', - 'blowing out candles', - 'bobsledding', - 'bodysurfing', - 'bookbinding', - 'bottling', - 'bouncing ball (not juggling)', - 'bouncing on bouncy castle', - 'bouncing on trampoline', - 'bowling', - 'braiding hair', - 'breading or breadcrumbing', - 'breakdancing', - 'breaking boards', - 'breaking glass', - 'breathing fire', - 'brush painting', - 'brushing floor', - 'brushing hair', - 'brushing teeth', - 'building cabinet', - 'building lego', - 'building sandcastle', - 'building shed', - 'bulldozing', - 'bungee jumping', - 'burping', - 'busking', - 'calculating', - 'calligraphy', - 'canoeing or kayaking', - 'capoeira', - 'capsizing', - 'card stacking', - 'card throwing', - 'carrying baby', - 'carrying weight', - 'cartwheeling', - 'carving ice', - 'carving marble', - 'carving pumpkin', - 'carving wood with a knife', - 'casting fishing line', - 'catching fish', - 'catching or throwing baseball', - 'catching or throwing frisbee', - 'catching or throwing softball', - 'celebrating', - 'changing gear in car', - 'changing oil', - 'changing wheel (not on bike)', - 'chasing', - 'checking tires', - 'checking watch', - 'cheerleading', - 'chewing gum', - 'chiseling stone', - 'chiseling wood', - 'chopping meat', - 'chopping wood', - 'clam digging', - 'clapping', - 'clay pottery making', - 'clean and jerk', - 'cleaning gutters', - 'cleaning pool', - 'cleaning shoes', - 'cleaning toilet', - 'cleaning windows', - 'climbing a rope', - 'climbing ladder', - 'climbing tree', - 'closing door', - 'coloring in', - 'combing hair', - 'contact juggling', - 'contorting', - 'cooking chicken', - 'cooking egg', - 'cooking on campfire', - 'cooking sausages (not on barbeque)', - 'cooking scallops', - 'cosplaying', - 'coughing', - 'counting money', - 'country line dancing', - 'cracking back', - 'cracking knuckles', - 'cracking neck', - 'crawling baby', - 'crocheting', - 'crossing eyes', - 'crossing river', - 'crying', - 'cumbia', - 'curling (sport)', - 'curling eyelashes', - 'curling hair', - 'cutting apple', - 'cutting cake', - 'cutting nails', - 'cutting orange', - 'cutting pineapple', - 'cutting watermelon', - 'dancing ballet', - 'dancing charleston', - 'dancing gangnam style', - 'dancing macarena', - 'deadlifting', - 'dealing cards', - 'decorating the christmas tree', - 'decoupage', - 'delivering mail', - 'digging', - 'dining', - 'directing traffic', - 'disc golfing', - 'diving cliff', - 'docking boat', - 'dodgeball', - 'doing aerobics', - 'doing jigsaw puzzle', - 'doing laundry', - 'doing nails', - 'doing sudoku', - 'drawing', - 'dribbling basketball', - 'drinking shots', - 'driving car', - 'driving tractor', - 'drooling', - 'drop kicking', - 'drumming fingers', - 'dumpster diving', - 'dunking basketball', - 'dyeing eyebrows', - 'dyeing hair', - 'eating burger', - 'eating cake', - 'eating carrots', - 'eating chips', - 'eating doughnuts', - 'eating hotdog', - 'eating ice cream', - 'eating nachos', - 'eating spaghetti', - 'eating watermelon', - 'egg hunting', - 'embroidering', - 'entering church', - 'exercising arm', - 'exercising with an exercise ball', - 'extinguishing fire', - 'faceplanting', - 'falling off bike', - 'falling off chair', - 'feeding birds', - 'feeding fish', - 'feeding goats', - 'fencing (sport)', - 'fidgeting', - 'filling cake', - 'filling eyebrows', - 'finger snapping', - 'fixing bicycle', - 'fixing hair', - 'flint knapping', - 'flipping bottle', - 'flipping pancake', - 'fly tying', - 'flying kite', - 'folding clothes', - 'folding napkins', - 'folding paper', - 'front raises', - 'frying vegetables', - 'gargling', - 'geocaching', - 'getting a haircut', - 'getting a piercing', - 'getting a tattoo', - 'giving or receiving award', - 'gold panning', - 'golf chipping', - 'golf driving', - 'golf putting', - 'gospel singing in church', - 'grinding meat', - 'grooming cat', - 'grooming dog', - 'grooming horse', - 'gymnastics tumbling', - 'hammer throw', - 'hand washing clothes', - 'head stand', - 'headbanging', - 'headbutting', - 'helmet diving', - 'herding cattle', - 'high fiving', - 'high jump', - 'high kick', - 'historical reenactment', - 'hitting baseball', - 'hockey stop', - 'holding snake', - 'home roasting coffee', - 'hopscotch', - 'hoverboarding', - 'huddling', - 'hugging (not baby)', - 'hugging baby', - 'hula hooping', - 'hurdling', - 'hurling (sport)', - 'ice climbing', - 'ice fishing', - 'ice skating', - 'ice swimming', - 'inflating balloons', - 'installing carpet', - 'ironing', - 'ironing hair', - 'javelin throw', - 'jaywalking', - 'jetskiing', - 'jogging', - 'juggling balls', - 'juggling fire', - 'juggling soccer ball', - 'jumping bicycle', - 'jumping into pool', - 'jumping jacks', - 'jumping sofa', - 'jumpstyle dancing', - 'karaoke', - 'kicking field goal', - 'kicking soccer ball', - 'kissing', - 'kitesurfing', - 'knitting', - 'krumping', - 'land sailing', - 'laughing', - 'lawn mower racing', - 'laying bricks', - 'laying concrete', - 'laying decking', - 'laying stone', - 'laying tiles', - 'leatherworking', - 'letting go of balloon', - 'licking', - 'lifting hat', - 'lighting candle', - 'lighting fire', - 'listening with headphones', - 'lock picking', - 'long jump', - 'longboarding', - 'looking at phone', - 'looking in mirror', - 'luge', - 'lunge', - 'making a cake', - 'making a sandwich', - 'making balloon shapes', - 'making bubbles', - 'making cheese', - 'making horseshoes', - 'making jewelry', - 'making latte art', - 'making paper aeroplanes', - 'making pizza', - 'making slime', - 'making snowman', - 'making sushi', - 'making tea', - 'making the bed', - 'marching', - 'marriage proposal', - 'massaging back', - 'massaging feet', - 'massaging legs', - 'massaging neck', - "massaging person's head", - 'metal detecting', - 'milking cow', - 'milking goat', - 'mixing colours', - 'moon walking', - 'mopping floor', - 'mosh pit dancing', - 'motorcycling', - 'mountain climber (exercise)', - 'moving baby', - 'moving child', - 'moving furniture', - 'mowing lawn', - 'mushroom foraging', - 'needle felting', - 'news anchoring', - 'opening bottle (not wine)', - 'opening coconuts', - 'opening door', - 'opening present', - 'opening refrigerator', - 'opening wine bottle', - 'packing', - 'paragliding', - 'parasailing', - 'parkour', - 'passing American football (in game)', - 'passing American football (not in game)', - 'passing soccer ball', - 'peeling apples', - 'peeling banana', - 'peeling potatoes', - 'person collecting garbage', - 'petting animal (not cat)', - 'petting cat', - 'petting horse', - 'photobombing', - 'photocopying', - 'picking apples', - 'picking blueberries', - 'pillow fight', - 'pinching', - 'pirouetting', - 'planing wood', - 'planting trees', - 'plastering', - 'playing accordion', - 'playing american football', - 'playing badminton', - 'playing bagpipes', - 'playing basketball', - 'playing bass guitar', - 'playing beer pong', - 'playing billiards', - 'playing blackjack', - 'playing cards', - 'playing cello', - 'playing checkers', - 'playing chess', - 'playing clarinet', - 'playing controller', - 'playing cricket', - 'playing cymbals', - 'playing darts', - 'playing didgeridoo', - 'playing dominoes', - 'playing drums', - 'playing field hockey', - 'playing flute', - 'playing gong', - 'playing guitar', - 'playing hand clapping games', - 'playing harmonica', - 'playing harp', - 'playing ice hockey', - 'playing keyboard', - 'playing kickball', - 'playing laser tag', - 'playing lute', - 'playing mahjong', - 'playing maracas', - 'playing marbles', - 'playing monopoly', - 'playing netball', - 'playing nose flute', - 'playing oboe', - 'playing ocarina', - 'playing organ', - 'playing paintball', - 'playing pan pipes', - 'playing piano', - 'playing piccolo', - 'playing pinball', - 'playing ping pong', - 'playing poker', - 'playing polo', - 'playing recorder', - 'playing road hockey', - 'playing rounders', - 'playing rubiks cube', - 'playing saxophone', - 'playing scrabble', - 'playing shuffleboard', - 'playing slot machine', - 'playing squash or racquetball', - 'playing tennis', - 'playing trombone', - 'playing trumpet', - 'playing ukulele', - 'playing violin', - 'playing volleyball', - 'playing with trains', - 'playing xylophone', - 'poaching eggs', - 'poking bellybutton', - 'pole vault', - 'polishing furniture', - 'polishing metal', - 'popping balloons', - 'pouring beer', - 'pouring milk', - 'pouring wine', - 'preparing salad', - 'presenting weather forecast', - 'pretending to be a statue', - 'pull ups', - 'pulling espresso shot', - 'pulling rope (game)', - 'pumping fist', - 'pumping gas', - 'punching bag', - 'punching person (boxing)', - 'push up', - 'pushing car', - 'pushing cart', - 'pushing wheelbarrow', - 'pushing wheelchair', - 'putting in contact lenses', - 'putting on eyeliner', - 'putting on foundation', - 'putting on lipstick', - 'putting on mascara', - 'putting on sari', - 'putting on shoes', - 'putting wallpaper on wall', - 'raising eyebrows', - 'reading book', - 'reading newspaper', - 'recording music', - 'repairing puncture', - 'riding a bike', - 'riding camel', - 'riding elephant', - 'riding mechanical bull', - 'riding mule', - 'riding or walking with horse', - 'riding scooter', - 'riding snow blower', - 'riding unicycle', - 'ripping paper', - 'roasting marshmallows', - 'roasting pig', - 'robot dancing', - 'rock climbing', - 'rock scissors paper', - 'roller skating', - 'rolling eyes', - 'rolling pastry', - 'rope pushdown', - 'running on treadmill', - 'sailing', - 'salsa dancing', - 'saluting', - 'sanding floor', - 'sanding wood', - 'sausage making', - 'sawing wood', - 'scrambling eggs', - 'scrapbooking', - 'scrubbing face', - 'scuba diving', - 'seasoning food', - 'separating eggs', - 'setting table', - 'sewing', - 'shaking hands', - 'shaking head', - 'shaping bread dough', - 'sharpening knives', - 'sharpening pencil', - 'shaving head', - 'shaving legs', - 'shearing sheep', - 'shining flashlight', - 'shining shoes', - 'shoot dance', - 'shooting basketball', - 'shooting goal (soccer)', - 'shooting off fireworks', - 'shopping', - 'shot put', - 'shouting', - 'shoveling snow', - 'shredding paper', - 'shucking oysters', - 'shuffling cards', - 'shuffling feet', - 'side kick', - 'sieving', - 'sign language interpreting', - 'silent disco', - 'singing', - 'sipping cup', - 'situp', - 'skateboarding', - 'ski ballet', - 'ski jumping', - 'skiing crosscountry', - 'skiing mono', - 'skiing slalom', - 'skipping rope', - 'skipping stone', - 'skydiving', - 'slacklining', - 'slapping', - 'sled dog racing', - 'sleeping', - 'slicing onion', - 'smashing', - 'smelling feet', - 'smoking', - 'smoking hookah', - 'smoking pipe', - 'snatch weight lifting', - 'sneezing', - 'snorkeling', - 'snowboarding', - 'snowkiting', - 'snowmobiling', - 'somersaulting', - 'spelunking', - 'spinning plates', - 'spinning poi', - 'splashing water', - 'spray painting', - 'spraying', - 'springboard diving', - 'square dancing', - 'squat', - 'squeezing orange', - 'stacking cups', - 'stacking dice', - 'standing on hands', - 'staring', - 'steer roping', - 'steering car', - 'sticking tongue out', - 'stomping grapes', - 'stretching arm', - 'stretching leg', - 'sucking lolly', - 'surfing crowd', - 'surfing water', - 'surveying', - 'sweeping floor', - 'swimming backstroke', - 'swimming breast stroke', - 'swimming butterfly stroke', - 'swimming front crawl', - 'swimming with dolphins', - 'swimming with sharks', - 'swing dancing', - 'swinging baseball bat', - 'swinging on something', - 'sword fighting', - 'sword swallowing', - 'tackling', - 'tagging graffiti', - 'tai chi', - 'taking photo', - 'talking on cell phone', - 'tango dancing', - 'tap dancing', - 'tapping guitar', - 'tapping pen', - 'tasting beer', - 'tasting food', - 'tasting wine', - 'testifying', - 'texting', - 'threading needle', - 'throwing axe', - 'throwing ball (not baseball or American football)', - 'throwing discus', - 'throwing knife', - 'throwing snowballs', - 'throwing tantrum', - 'throwing water balloon', - 'tickling', - 'tie dying', - 'tightrope walking', - 'tiptoeing', - 'tobogganing', - 'tossing coin', - 'tossing salad', - 'training dog', - 'trapezing', - 'treating wood', - 'trimming or shaving beard', - 'trimming shrubs', - 'trimming trees', - 'triple jump', - 'twiddling fingers', - 'tying bow tie', - 'tying knot (not on a tie)', - 'tying necktie', - 'tying shoe laces', - 'unboxing', - 'uncorking champagne', - 'unloading truck', - 'using a microscope', - 'using a paint roller', - 'using a power drill', - 'using a sledge hammer', - 'using a wrench', - 'using atm', - 'using bagging machine', - 'using circular saw', - 'using inhaler', - 'using megaphone', - 'using puppets', - 'using remote controller (not gaming)', - 'using segway', - 'vacuuming car', - 'vacuuming floor', - 'visiting the zoo', - 'wading through mud', - 'wading through water', - 'waiting in line', - 'waking up', - 'walking on stilts', - 'walking the dog', - 'walking through snow', - 'walking with crutches', - 'washing dishes', - 'washing feet', - 'washing hair', - 'washing hands', - 'watching tv', - 'water skiing', - 'water sliding', - 'watering plants', - 'waving hand', - 'waxing armpits', - 'waxing back', - 'waxing chest', - 'waxing eyebrows', - 'waxing legs', - 'weaving basket', - 'weaving fabric', - 'welding', - 'whistling', - 'windsurfing', - 'winking', - 'wood burning (art)', - 'wrapping present', - 'wrestling', - 'writing', - 'yarn spinning', - 'yawning', - 'yoga', - 'zumba' -] - -templates = [ - 'a photo of {}.', - 'a photo of a person {}.', - 'a photo of a person using {}.', - 'a photo of a person doing {}.', - 'a photo of a person during {}.', - 'a photo of a person performing {}.', - 'a photo of a person practicing {}.', - 'a video of {}.', - 'a video of a person {}.', - 'a video of a person using {}.', - 'a video of a person doing {}.', - 'a video of a person during {}.', - 'a video of a person performing {}.', - 'a video of a person practicing {}.', - 'a example of {}.', - 'a example of a person {}.', - 'a example of a person using {}.', - 'a example of a person doing {}.', - 'a example of a person during {}.', - 'a example of a person performing {}.', - 'a example of a person practicing {}.', - 'a demonstration of {}.', - 'a demonstration of a person {}.', - 'a demonstration of a person using {}.', - 'a demonstration of a person doing {}.', - 'a demonstration of a person during {}.', - 'a demonstration of a person performing {}.', - 'a demonstration of a person practicing {}.', -] -``` - - - -## MNIST - -```bash -classes = [ - '0', - '1', - '2', - '3', - '4', - '5', - '6', - '7', - '8', - '9', -] - -templates = [ - 'a photo of the number: "{}".', -] -``` - - - -## OxfordPets - -```bash -classes = [ - 'Abyssinian', - 'Bengal', - 'Birman', - 'Bombay', - 'British Shorthair', - 'Egyptian Mau', - 'Maine Coon', - 'Persian', - 'Ragdoll', - 'Russian Blue', - 'Siamese', - 'Sphynx', - 'american bulldog', - 'american pit bull terrier', - 'basset hound', - 'beagle', - 'boxer', - 'chihuahua', - 'english cocker spaniel', - 'english setter', - 'german shorthaired', - 'great pyrenees', - 'havanese', - 'japanese chin', - 'keeshond', - 'leonberger', - 'miniature pinscher', - 'newfoundland', - 'pomeranian', - 'pug', - 'saint bernard', - 'samoyed', - 'scottish terrier', - 'shiba inu', - 'staffordshire bull terrier', - 'wheaten terrier', - 'yorkshire terrier', -] - -templates = [ - 'a photo of a {}, a type of pet.', -] -``` - - - -## PascalVOC2007 - -```bash -classes = [ - 'aeroplane', - 'bicycle', - 'bird', - 'boat', - 'bottle', - 'bus', - 'car', - 'cat', - 'chair', - 'cow', - 'dog', - 'horse', - 'motorbike', - 'person', - 'sheep', - 'sofa', - 'diningtable', - 'pottedplant', - 'train', - 'tvmonitor', -] - -templates = [ - 'a photo of a {}.', -] -``` - - - -## PatchCamelyon - -```bash -classes = [ - 'lymph node', - 'lymph node containing metastatic tumor tissue', -] - -templates = [ - 'this is a photo of {}', -] -``` - - - -## RESISC45 - -```bash -classes = [ - 'airplane', - 'airport', - 'baseball diamond', - 'basketball court', - 'beach', - 'bridge', - 'chaparral', - 'church', - 'circular farmland', - 'cloud', - 'commercial area', - 'dense residential', - 'desert', - 'forest', - 'freeway', - 'golf course', - 'ground track field', - 'harbor', - 'industrial area', - 'intersection', - 'island', - 'lake', - 'meadow', - 'medium residential', - 'mobile home park', - 'mountain', - 'overpass', - 'palace', - 'parking lot', - 'railway', - 'railway station', - 'rectangular farmland', - 'river', - 'roundabout', - 'runway', - 'sea ice', - 'ship', - 'snowberg', - 'sparse residential', - 'stadium', - 'storage tank', - 'tennis court', - 'terrace', - 'thermal power station', - 'wetland', -] - -templates = [ - 'satellite imagery of {}.', - 'aerial imagery of {}.', - 'satellite photo of {}.', - 'aerial photo of {}.', - 'satellite view of {}.', - 'aerial view of {}.', - 'satellite imagery of a {}.', - 'aerial imagery of a {}.', - 'satellite photo of a {}.', - 'aerial photo of a {}.', - 'satellite view of a {}.', - 'aerial view of a {}.', - 'satellite imagery of the {}.', - 'aerial imagery of the {}.', - 'satellite photo of the {}.', - 'aerial photo of the {}.', - 'satellite view of the {}.', - 'aerial view of the {}.', -] -``` - - - -## SST2 - -```bash -classes = [ - 'negative', - 'positive', -] - -templates = [ - 'a {} review of a movie.', -] -``` - - - -## STL10 - -```bash -classes = [ - 'airplane', - 'bird', - 'car', - 'cat', - 'deer', - 'dog', - 'horse', - 'monkey', - 'ship', - 'truck', -] - -templates = [ - 'a photo of a {}.', - 'a photo of the {}.', -] -``` - - - -## SUN397 - -```bash -classes = [ - 'abbey', - 'airplane cabin', - 'airport terminal', - 'alley', - 'amphitheater', - 'amusement arcade', - 'amusement park', - 'anechoic chamber', - 'apartment building outdoor', - 'apse indoor', - 'aquarium', - 'aqueduct', - 'arch', - 'archive', - 'arrival gate outdoor', - 'art gallery', - 'art school', - 'art studio', - 'assembly line', - 'athletic field outdoor', - 'atrium public', - 'attic', - 'auditorium', - 'auto factory', - 'badlands', - 'badminton court indoor', - 'baggage claim', - 'bakery shop', - 'balcony exterior', - 'balcony interior', - 'ball pit', - 'ballroom', - 'bamboo forest', - 'banquet hall', - 'bar', - 'barn', - 'barndoor', - 'baseball field', - 'basement', - 'basilica', - 'basketball court outdoor', - 'bathroom', - 'batters box', - 'bayou', - 'bazaar indoor', - 'bazaar outdoor', - 'beach', - 'beauty salon', - 'bedroom', - 'berth', - 'biology laboratory', - 'bistro indoor', - 'boardwalk', - 'boat deck', - 'boathouse', - 'bookstore', - 'booth indoor', - 'botanical garden', - 'bow window indoor', - 'bow window outdoor', - 'bowling alley', - 'boxing ring', - 'brewery indoor', - 'bridge', - 'building facade', - 'bullring', - 'burial chamber', - 'bus interior', - 'butchers shop', - 'butte', - 'cabin outdoor', - 'cafeteria', - 'campsite', - 'campus', - 'canal natural', - 'canal urban', - 'candy store', - 'canyon', - 'car interior backseat', - 'car interior frontseat', - 'carrousel', - 'casino indoor', - 'castle', - 'catacomb', - 'cathedral indoor', - 'cathedral outdoor', - 'cavern indoor', - 'cemetery', - 'chalet', - 'cheese factory', - 'chemistry lab', - 'chicken coop indoor', - 'chicken coop outdoor', - 'childs room', - 'church indoor', - 'church outdoor', - 'classroom', - 'clean room', - 'cliff', - 'cloister indoor', - 'closet', - 'clothing store', - 'coast', - 'cockpit', - 'coffee shop', - 'computer room', - 'conference center', - 'conference room', - 'construction site', - 'control room', - 'control tower outdoor', - 'corn field', - 'corral', - 'corridor', - 'cottage garden', - 'courthouse', - 'courtroom', - 'courtyard', - 'covered bridge exterior', - 'creek', - 'crevasse', - 'crosswalk', - 'cubicle office', - 'dam', - 'delicatessen', - 'dentists office', - 'desert sand', - 'desert vegetation', - 'diner indoor', - 'diner outdoor', - 'dinette home', - 'dinette vehicle', - 'dining car', - 'dining room', - 'discotheque', - 'dock', - 'doorway outdoor', - 'dorm room', - 'driveway', - 'driving range outdoor', - 'drugstore', - 'electrical substation', - 'elevator door', - 'elevator interior', - 'elevator shaft', - 'engine room', - 'escalator indoor', - 'excavation', - 'factory indoor', - 'fairway', - 'fastfood restaurant', - 'field cultivated', - 'field wild', - 'fire escape', - 'fire station', - 'firing range indoor', - 'fishpond', - 'florist shop indoor', - 'food court', - 'forest broadleaf', - 'forest needleleaf', - 'forest path', - 'forest road', - 'formal garden', - 'fountain', - 'galley', - 'game room', - 'garage indoor', - 'garbage dump', - 'gas station', - 'gazebo exterior', - 'general store indoor', - 'general store outdoor', - 'gift shop', - 'golf course', - 'greenhouse indoor', - 'greenhouse outdoor', - 'gymnasium indoor', - 'hangar indoor', - 'hangar outdoor', - 'harbor', - 'hayfield', - 'heliport', - 'herb garden', - 'highway', - 'hill', - 'home office', - 'hospital', - 'hospital room', - 'hot spring', - 'hot tub outdoor', - 'hotel outdoor', - 'hotel room', - 'house', - 'hunting lodge outdoor', - 'ice cream parlor', - 'ice floe', - 'ice shelf', - 'ice skating rink indoor', - 'ice skating rink outdoor', - 'iceberg', - 'igloo', - 'industrial area', - 'inn outdoor', - 'islet', - 'jacuzzi indoor', - 'jail cell', - 'jail indoor', - 'jewelry shop', - 'kasbah', - 'kennel indoor', - 'kennel outdoor', - 'kindergarden classroom', - 'kitchen', - 'kitchenette', - 'labyrinth outdoor', - 'lake natural', - 'landfill', - 'landing deck', - 'laundromat', - 'lecture room', - 'library indoor', - 'library outdoor', - 'lido deck outdoor', - 'lift bridge', - 'lighthouse', - 'limousine interior', - 'living room', - 'lobby', - 'lock chamber', - 'locker room', - 'mansion', - 'manufactured home', - 'market indoor', - 'market outdoor', - 'marsh', - 'martial arts gym', - 'mausoleum', - 'medina', - 'moat water', - 'monastery outdoor', - 'mosque indoor', - 'mosque outdoor', - 'motel', - 'mountain', - 'mountain snowy', - 'movie theater indoor', - 'museum indoor', - 'music store', - 'music studio', - 'nuclear power plant outdoor', - 'nursery', - 'oast house', - 'observatory outdoor', - 'ocean', - 'office', - 'office building', - 'oil refinery outdoor', - 'oilrig', - 'operating room', - 'orchard', - 'outhouse outdoor', - 'pagoda', - 'palace', - 'pantry', - 'park', - 'parking garage indoor', - 'parking garage outdoor', - 'parking lot', - 'parlor', - 'pasture', - 'patio', - 'pavilion', - 'pharmacy', - 'phone booth', - 'physics laboratory', - 'picnic area', - 'pilothouse indoor', - 'planetarium outdoor', - 'playground', - 'playroom', - 'plaza', - 'podium indoor', - 'podium outdoor', - 'pond', - 'poolroom establishment', - 'poolroom home', - 'power plant outdoor', - 'promenade deck', - 'pub indoor', - 'pulpit', - 'putting green', - 'racecourse', - 'raceway', - 'raft', - 'railroad track', - 'rainforest', - 'reception', - 'recreation room', - 'residential neighborhood', - 'restaurant', - 'restaurant kitchen', - 'restaurant patio', - 'rice paddy', - 'riding arena', - 'river', - 'rock arch', - 'rope bridge', - 'ruin', - 'runway', - 'sandbar', - 'sandbox', - 'sauna', - 'schoolhouse', - 'sea cliff', - 'server room', - 'shed', - 'shoe shop', - 'shopfront', - 'shopping mall indoor', - 'shower', - 'skatepark', - 'ski lodge', - 'ski resort', - 'ski slope', - 'sky', - 'skyscraper', - 'slum', - 'snowfield', - 'squash court', - 'stable', - 'stadium baseball', - 'stadium football', - 'stage indoor', - 'staircase', - 'street', - 'subway interior', - 'subway station platform', - 'supermarket', - 'sushi bar', - 'swamp', - 'swimming pool indoor', - 'swimming pool outdoor', - 'synagogue indoor', - 'synagogue outdoor', - 'television studio', - 'temple east asia', - 'temple south asia', - 'tennis court indoor', - 'tennis court outdoor', - 'tent outdoor', - 'theater indoor procenium', - 'theater indoor seats', - 'thriftshop', - 'throne room', - 'ticket booth', - 'toll plaza', - 'topiary garden', - 'tower', - 'toyshop', - 'track outdoor', - 'train railway', - 'train station platform', - 'tree farm', - 'tree house', - 'trench', - 'underwater coral reef', - 'utility room', - 'valley', - 'van interior', - 'vegetable garden', - 'veranda', - 'veterinarians office', - 'viaduct', - 'videostore', - 'village', - 'vineyard', - 'volcano', - 'volleyball court indoor', - 'volleyball court outdoor', - 'waiting room', - 'warehouse indoor', - 'water tower', - 'waterfall block', - 'waterfall fan', - 'waterfall plunge', - 'watering hole', - 'wave', - 'wet bar', - 'wheat field', - 'wind farm', - 'windmill', - 'wine cellar barrel storage', - 'wine cellar bottle storage', - 'wrestling ring indoor', - 'yard', - 'youth hostel', -] - -templates = [ - 'a photo of a {}.', - 'a photo of the {}.', -] -``` - - - -## StanfordCars - -```bash -classes = [ - 'AM General Hummer SUV 2000', - 'Acura RL Sedan 2012', - 'Acura TL Sedan 2012', - 'Acura TL Type-S 2008', - 'Acura TSX Sedan 2012', - 'Acura Integra Type R 2001', - 'Acura ZDX Hatchback 2012', - 'Aston Martin V8 Vantage Convertible 2012', - 'Aston Martin V8 Vantage Coupe 2012', - 'Aston Martin Virage Convertible 2012', - 'Aston Martin Virage Coupe 2012', - 'Audi RS 4 Convertible 2008', - 'Audi A5 Coupe 2012', - 'Audi TTS Coupe 2012', - 'Audi R8 Coupe 2012', - 'Audi V8 Sedan 1994', - 'Audi 100 Sedan 1994', - 'Audi 100 Wagon 1994', - 'Audi TT Hatchback 2011', - 'Audi S6 Sedan 2011', - 'Audi S5 Convertible 2012', - 'Audi S5 Coupe 2012', - 'Audi S4 Sedan 2012', - 'Audi S4 Sedan 2007', - 'Audi TT RS Coupe 2012', - 'BMW ActiveHybrid 5 Sedan 2012', - 'BMW 1 Series Convertible 2012', - 'BMW 1 Series Coupe 2012', - 'BMW 3 Series Sedan 2012', - 'BMW 3 Series Wagon 2012', - 'BMW 6 Series Convertible 2007', - 'BMW X5 SUV 2007', - 'BMW X6 SUV 2012', - 'BMW M3 Coupe 2012', - 'BMW M5 Sedan 2010', - 'BMW M6 Convertible 2010', - 'BMW X3 SUV 2012', - 'BMW Z4 Convertible 2012', - 'Bentley Continental Supersports Conv. Convertible 2012', - 'Bentley Arnage Sedan 2009', - 'Bentley Mulsanne Sedan 2011', - 'Bentley Continental GT Coupe 2012', - 'Bentley Continental GT Coupe 2007', - 'Bentley Continental Flying Spur Sedan 2007', - 'Bugatti Veyron 16.4 Convertible 2009', - 'Bugatti Veyron 16.4 Coupe 2009', - 'Buick Regal GS 2012', - 'Buick Rainier SUV 2007', - 'Buick Verano Sedan 2012', - 'Buick Enclave SUV 2012', - 'Cadillac CTS-V Sedan 2012', - 'Cadillac SRX SUV 2012', - 'Cadillac Escalade EXT Crew Cab 2007', - 'Chevrolet Silverado 1500 Hybrid Crew Cab 2012', - 'Chevrolet Corvette Convertible 2012', - 'Chevrolet Corvette ZR1 2012', - 'Chevrolet Corvette Ron Fellows Edition Z06 2007', - 'Chevrolet Traverse SUV 2012', - 'Chevrolet Camaro Convertible 2012', - 'Chevrolet HHR SS 2010', - 'Chevrolet Impala Sedan 2007', - 'Chevrolet Tahoe Hybrid SUV 2012', - 'Chevrolet Sonic Sedan 2012', - 'Chevrolet Express Cargo Van 2007', - 'Chevrolet Avalanche Crew Cab 2012', - 'Chevrolet Cobalt SS 2010', - 'Chevrolet Malibu Hybrid Sedan 2010', - 'Chevrolet TrailBlazer SS 2009', - 'Chevrolet Silverado 2500HD Regular Cab 2012', - 'Chevrolet Silverado 1500 Classic Extended Cab 2007', - 'Chevrolet Express Van 2007', - 'Chevrolet Monte Carlo Coupe 2007', - 'Chevrolet Malibu Sedan 2007', - 'Chevrolet Silverado 1500 Extended Cab 2012', - 'Chevrolet Silverado 1500 Regular Cab 2012', - 'Chrysler Aspen SUV 2009', - 'Chrysler Sebring Convertible 2010', - 'Chrysler Town and Country Minivan 2012', - 'Chrysler 300 SRT-8 2010', - 'Chrysler Crossfire Convertible 2008', - 'Chrysler PT Cruiser Convertible 2008', - 'Daewoo Nubira Wagon 2002', - 'Dodge Caliber Wagon 2012', - 'Dodge Caliber Wagon 2007', - 'Dodge Caravan Minivan 1997', - 'Dodge Ram Pickup 3500 Crew Cab 2010', - 'Dodge Ram Pickup 3500 Quad Cab 2009', - 'Dodge Sprinter Cargo Van 2009', - 'Dodge Journey SUV 2012', - 'Dodge Dakota Crew Cab 2010', - 'Dodge Dakota Club Cab 2007', - 'Dodge Magnum Wagon 2008', - 'Dodge Challenger SRT8 2011', - 'Dodge Durango SUV 2012', - 'Dodge Durango SUV 2007', - 'Dodge Charger Sedan 2012', - 'Dodge Charger SRT-8 2009', - 'Eagle Talon Hatchback 1998', - 'FIAT 500 Abarth 2012', - 'FIAT 500 Convertible 2012', - 'Ferrari FF Coupe 2012', - 'Ferrari California Convertible 2012', - 'Ferrari 458 Italia Convertible 2012', - 'Ferrari 458 Italia Coupe 2012', - 'Fisker Karma Sedan 2012', - 'Ford F-450 Super Duty Crew Cab 2012', - 'Ford Mustang Convertible 2007', - 'Ford Freestar Minivan 2007', - 'Ford Expedition EL SUV 2009', - 'Ford Edge SUV 2012', - 'Ford Ranger SuperCab 2011', - 'Ford GT Coupe 2006', - 'Ford F-150 Regular Cab 2012', - 'Ford F-150 Regular Cab 2007', - 'Ford Focus Sedan 2007', - 'Ford E-Series Wagon Van 2012', - 'Ford Fiesta Sedan 2012', - 'GMC Terrain SUV 2012', - 'GMC Savana Van 2012', - 'GMC Yukon Hybrid SUV 2012', - 'GMC Acadia SUV 2012', - 'GMC Canyon Extended Cab 2012', - 'Geo Metro Convertible 1993', - 'HUMMER H3T Crew Cab 2010', - 'HUMMER H2 SUT Crew Cab 2009', - 'Honda Odyssey Minivan 2012', - 'Honda Odyssey Minivan 2007', - 'Honda Accord Coupe 2012', - 'Honda Accord Sedan 2012', - 'Hyundai Veloster Hatchback 2012', - 'Hyundai Santa Fe SUV 2012', - 'Hyundai Tucson SUV 2012', - 'Hyundai Veracruz SUV 2012', - 'Hyundai Sonata Hybrid Sedan 2012', - 'Hyundai Elantra Sedan 2007', - 'Hyundai Accent Sedan 2012', - 'Hyundai Genesis Sedan 2012', - 'Hyundai Sonata Sedan 2012', - 'Hyundai Elantra Touring Hatchback 2012', - 'Hyundai Azera Sedan 2012', - 'Infiniti G Coupe IPL 2012', - 'Infiniti QX56 SUV 2011', - 'Isuzu Ascender SUV 2008', - 'Jaguar XK XKR 2012', - 'Jeep Patriot SUV 2012', - 'Jeep Wrangler SUV 2012', - 'Jeep Liberty SUV 2012', - 'Jeep Grand Cherokee SUV 2012', - 'Jeep Compass SUV 2012', - 'Lamborghini Reventon Coupe 2008', - 'Lamborghini Aventador Coupe 2012', - 'Lamborghini Gallardo LP 570-4 Superleggera 2012', - 'Lamborghini Diablo Coupe 2001', - 'Land Rover Range Rover SUV 2012', - 'Land Rover LR2 SUV 2012', - 'Lincoln Town Car Sedan 2011', - 'MINI Cooper Roadster Convertible 2012', - 'Maybach Landaulet Convertible 2012', - 'Mazda Tribute SUV 2011', - 'McLaren MP4-12C Coupe 2012', - 'Mercedes-Benz 300-Class Convertible 1993', - 'Mercedes-Benz C-Class Sedan 2012', - 'Mercedes-Benz SL-Class Coupe 2009', - 'Mercedes-Benz E-Class Sedan 2012', - 'Mercedes-Benz S-Class Sedan 2012', - 'Mercedes-Benz Sprinter Van 2012', - 'Mitsubishi Lancer Sedan 2012', - 'Nissan Leaf Hatchback 2012', - 'Nissan NV Passenger Van 2012', - 'Nissan Juke Hatchback 2012', - 'Nissan 240SX Coupe 1998', - 'Plymouth Neon Coupe 1999', - 'Porsche Panamera Sedan 2012', - 'Ram C/V Cargo Van Minivan 2012', - 'Rolls-Royce Phantom Drophead Coupe Convertible 2012', - 'Rolls-Royce Ghost Sedan 2012', - 'Rolls-Royce Phantom Sedan 2012', - 'Scion xD Hatchback 2012', - 'Spyker C8 Convertible 2009', - 'Spyker C8 Coupe 2009', - 'Suzuki Aerio Sedan 2007', - 'Suzuki Kizashi Sedan 2012', - 'Suzuki SX4 Hatchback 2012', - 'Suzuki SX4 Sedan 2012', - 'Tesla Model S Sedan 2012', - 'Toyota Sequoia SUV 2012', - 'Toyota Camry Sedan 2012', - 'Toyota Corolla Sedan 2012', - 'Toyota 4Runner SUV 2012', - 'Volkswagen Golf Hatchback 2012', - 'Volkswagen Golf Hatchback 1991', - 'Volkswagen Beetle Hatchback 2012', - 'Volvo C30 Hatchback 2012', - 'Volvo 240 Sedan 1993', - 'Volvo XC90 SUV 2007', - 'smart fortwo Convertible 2012', -] - -templates = [ - 'a photo of a {}.', - 'a photo of the {}.', - 'a photo of my {}.', - 'i love my {}!', - 'a photo of my dirty {}.', - 'a photo of my clean {}.', - 'a photo of my new {}.', - 'a photo of my old {}.', -] -``` - - - -## UCF101 - -```bash -classes = [ - 'Apply Eye Makeup', - 'Apply Lipstick', - 'Archery', - 'Baby Crawling', - 'Balance Beam', - 'Band Marching', - 'Baseball Pitch', - 'Basketball', - 'Basketball Dunk', - 'Bench Press', - 'Biking', - 'Billiards', - 'Blow Dry Hair', - 'Blowing Candles', - 'Body Weight Squats', - 'Bowling', - 'Boxing Punching Bag', - 'Boxing Speed Bag', - 'Breast Stroke', - 'Brushing Teeth', - 'Clean And Jerk', - 'Cliff Diving', - 'Cricket Bowling', - 'Cricket Shot', - 'Cutting In Kitchen', - 'Diving', - 'Drumming', - 'Fencing', - 'Field Hockey Penalty', - 'Floor Gymnastics', - 'Frisbee Catch', - 'Front Crawl', - 'Golf Swing', - 'Haircut', - 'Hammer Throw', - 'Hammering', - 'Hand Stand Pushups', - 'Handstand Walking', - 'Head Massage', - 'High Jump', - 'Horse Race', - 'Horse Riding', - 'Hula Hoop', - 'Ice Dancing', - 'Javelin Throw', - 'Juggling Balls', - 'Jump Rope', - 'Jumping Jack', - 'Kayaking', - 'Knitting', - 'Long Jump', - 'Lunges', - 'Military Parade', - 'Mixing', - 'Mopping Floor', - 'Nunchucks', - 'Parallel Bars', - 'Pizza Tossing', - 'Playing Cello', - 'Playing Daf', - 'Playing Dhol', - 'Playing Flute', - 'Playing Guitar', - 'Playing Piano', - 'Playing Sitar', - 'Playing Tabla', - 'Playing Violin', - 'Pole Vault', - 'Pommel Horse', - 'Pull Ups', - 'Punch', - 'Push Ups', - 'Rafting', - 'Rock Climbing Indoor', - 'Rope Climbing', - 'Rowing', - 'Salsa Spin', - 'Shaving Beard', - 'Shotput', - 'Skate Boarding', - 'Skiing', - 'Skijet', - 'Sky Diving', - 'Soccer Juggling', - 'Soccer Penalty', - 'Still Rings', - 'Sumo Wrestling', - 'Surfing', - 'Swing', - 'Table Tennis Shot', - 'Tai Chi', - 'Tennis Swing', - 'Throw Discus', - 'Trampoline Jumping', - 'Typing', - 'Uneven Bars', - 'Volleyball Spiking', - 'Walking With Dog', - 'Wall Pushups', - 'Writing On Board', - 'Yo Yo', -] - -templates = [ - 'a photo of a person {}.', - 'a video of a person {}.', - 'a example of a person {}.', - 'a demonstration of a person {}.', - 'a photo of the person {}.', - 'a video of the person {}.', - 'a example of the person {}.', - 'a demonstration of the person {}.', - 'a photo of a person using {}.', - 'a video of a person using {}.', - 'a example of a person using {}.', - 'a demonstration of a person using {}.', - 'a photo of the person using {}.', - 'a video of the person using {}.', - 'a example of the person using {}.', - 'a demonstration of the person using {}.', - 'a photo of a person doing {}.', - 'a video of a person doing {}.', - 'a example of a person doing {}.', - 'a demonstration of a person doing {}.', - 'a photo of the person doing {}.', - 'a video of the person doing {}.', - 'a example of the person doing {}.', - 'a demonstration of the person doing {}.', - 'a photo of a person during {}.', - 'a video of a person during {}.', - 'a example of a person during {}.', - 'a demonstration of a person during {}.', - 'a photo of the person during {}.', - 'a video of the person during {}.', - 'a example of the person during {}.', - 'a demonstration of the person during {}.', - 'a photo of a person performing {}.', - 'a video of a person performing {}.', - 'a example of a person performing {}.', - 'a demonstration of a person performing {}.', - 'a photo of the person performing {}.', - 'a video of the person performing {}.', - 'a example of the person performing {}.', - 'a demonstration of the person performing {}.', - 'a photo of a person practicing {}.', - 'a video of a person practicing {}.', - 'a example of a person practicing {}.', - 'a demonstration of a person practicing {}.', - 'a photo of the person practicing {}.', - 'a video of the person practicing {}.', - 'a example of the person practicing {}.', - 'a demonstration of the person practicing {}.', -] -``` - - diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py deleted file mode 100644 index 21d1122144d207637d2444cba1f68fe630c89f31..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py +++ /dev/null @@ -1,176 +0,0 @@ -import torch -from torch import nn - -assert torch.__version__ >= "1.8.1" -from torch.utils.checkpoint import checkpoint_sequential - -__all__ = ['iresnet2060'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, ) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, ) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def checkpoint(self, func, num_seg, x): - if self.training: - return checkpoint_sequential(func, num_seg, x) - else: - return func(x) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.checkpoint(self.layer2, 20, x) - x = self.checkpoint(self.layer3, 100, x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet2060(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/run.sh b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/run.sh deleted file mode 100644 index 61af4b4950eb11334e55362e3e3c5e2796979a01..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/run.sh +++ /dev/null @@ -1,2 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50 -ps -ef | grep "train" | grep -v grep | awk '{print "kill -9 "$2}' | sh diff --git a/spaces/idsedykh/codebleu2/app.py b/spaces/idsedykh/codebleu2/app.py deleted file mode 100644 index fae2bd7b30e3f86bc95438df72c19cafc84594d0..0000000000000000000000000000000000000000 --- a/spaces/idsedykh/codebleu2/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import evaluate -from evaluate.utils import launch_gradio_widget - - -module = evaluate.load("idsedykh/codebleu2") -launch_gradio_widget(module) \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Adobe Type Manager For Win 7 64 Bit.15 A Free Font Utility for Viewing and Printing PostScript Fonts.md b/spaces/inamXcontru/PoeticTTS/Adobe Type Manager For Win 7 64 Bit.15 A Free Font Utility for Viewing and Printing PostScript Fonts.md deleted file mode 100644 index cfd0948da85f1ed2594848f5dcfc5ca5de6c51e8..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Adobe Type Manager For Win 7 64 Bit.15 A Free Font Utility for Viewing and Printing PostScript Fonts.md +++ /dev/null @@ -1,6 +0,0 @@ -

aadu oru bheekara jeeviyanu full movie download hd


Download File 🗸🗸🗸 https://gohhs.com/2uz2Mh



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Boom 3D 1.1.6 Crack MacOS MacOSX.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Boom 3D 1.1.6 Crack MacOS MacOSX.md deleted file mode 100644 index 5067df40bb9c78c1df62b6e5ca12c2f50da782e6..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Boom 3D 1.1.6 Crack MacOS MacOSX.md +++ /dev/null @@ -1,9 +0,0 @@ - -

the best ebooks application that gives you the most energizing experience ever. this application is the first-rate and enthusiastic alternative to its counterparts. with boom 3d crack you can download an ebook just as you download music, video. the interface and highlights are more appealing and easy than ever. it is an application that raises the quality of your ipad to the limit. it is the software with which you can download your application and music with the greatest ease. boom 3d crack mac download full version is the best application for ipad/iphone. you can also download its patch for win users. it also provides a utility to set passcode for your device. it has a new and an advancement over the previous version. the best sound that is charged up and most reasonable interface. you can get a lot of applications for ipad from this software.

-

you can download the boom 3d 1.1.6 crack macosx all your applications now from any location. boom 3d 1.6 crack macosx boom 3d crack macosx. boom 3d crack macosx. download boom 3d mac crack. boom 3d mac osx.

-

Boom 3D 1.1.6 Crack macOS MacOSX


Download File ———>>> https://urlin.us/2uExZA



-

to achieve a win-win situation, you may move multiple files using this software. this application has an effective volume management system with a user-friendly interface. you can download this full version from the given link below. boom 3d 1.1.6 crack macosx.

-

boom 3d mac osx is so easy to use and user-friendly. its interface is very simple and attractive. it works the same way as it works in a computer but the difference is that, it is a software for your computer.

-

boom 3d crack mac osx is an app which it was incorporated to the audio app category. you can easily and effectively listen to all your favorite songs. with a unique interface, this app becomes a very easy to use and simple app. music is a very important source of pleasure. if you’re a music lover and have a good collection of your favorite music, you will love this app. you can also download boom 3d crack macosx from the below links.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Amada Ap100 Software Crack Download).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Amada Ap100 Software Crack Download).md deleted file mode 100644 index 00b1444c5bed1c2a3f7b268b5bc5a9e2b05b4661..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Amada Ap100 Software Crack Download).md +++ /dev/null @@ -1,74 +0,0 @@ - -

HD Online Player (Amada Ap100 Software Crack Download)

- -

If you are looking for a way to watch movies online with HD quality and without any hassle, you might be interested in HD Online Player (Amada Ap100 Software Crack Download). This is a software that allows you to download and play any movie file from the internet with just a few clicks. You can also use it to install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication.

- -

What is HD Online Player?

- -

HD Online Player is a simple and easy-to-use program that enables the web browser to display movies from HTML pages. It supports various formats such as MP4, AVI, MKV, FLV, WMV, MOV, etc. You can also adjust the volume, brightness, contrast, and other settings of the video. HD Online Player works with any web browser and does not require any installation or registration.

-

HD Online Player (Amada Ap100 Software Crack Download)


Download Ziphttps://urlin.us/2uEwuJ



- -

What is Amada Ap100 Software Crack?

- -

Amada Ap100 Software Crack is a cracked version of Amada Ap100 Software, which is the most widely used CAD/CAM package for sheet metal fabrication. It is compatible with all types of punching and cutting machines, such as laser, waterjet, plasma, and combination punch/cut. It has a built-in 2D CAD system with sheet metal specific logic and direct integration with 3D solid modeling packages. It also supports and interfaces with all industry standard file formats.

- -

How to Download HD Online Player (Amada Ap100 Software Crack Download)?

- -

To download HD Online Player (Amada Ap100 Software Crack Download), you need to follow these steps:

- -
    -
  1. Go to the official website of HD Online Player (Amada Ap100 Software Crack Download) and click on the download link.
  2. -
  3. Choose the file that suits your operating system (Windows or Mac) and save it on your computer.
  4. -
  5. Open the downloaded file and follow the instructions to install HD Online Player on your web browser.
  6. -
  7. Go to the website where you want to watch movies online and click on the HD Online Player icon on the top right corner of your browser.
  8. -
  9. Select the movie file that you want to download and play.
  10. -
  11. To install Amada Ap100 Software Crack, you need to open the folder that contains the crack files and copy them to the installation directory of Amada Ap100 Software.
  12. -
  13. Run Amada Ap100 Software and enjoy its full features.
  14. -
- -

Why Choose HD Online Player (Amada Ap100 Software Crack Download)?

- -

There are many reasons why you should choose HD Online Player (Amada Ap100 Software Crack Download) over other similar software. Some of them are:

- -
    -
  • It is free and easy to use.
  • -
  • It does not require any installation or registration.
  • -
  • It supports various video formats and quality settings.
  • -
  • It allows you to download and play any movie file from the internet.
  • -
  • It also enables you to install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication.
  • -
- -

In conclusion, HD Online Player (Amada Ap100 Software Crack Download) is a software that can help you watch movies online with HD quality and without any hassle. You can also use it to install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication. If you are interested in this software, you can download it from the official website and enjoy its benefits.

-

How to Use HD Online Player (Amada Ap100 Software Crack Download)?

- -

Once you have downloaded and installed HD Online Player (Amada Ap100 Software Crack Download), you can use it to watch movies online with HD quality and without any hassle. Here are some tips on how to use it:

- -
    -
  • To watch movies online, you need to go to the website where the movie file is hosted and click on the HD Online Player icon on the top right corner of your browser.
  • -
  • A pop-up window will appear where you can select the movie file that you want to download and play. You can also choose the quality setting and the download location of the file.
  • -
  • After selecting the file, click on the download button and wait for the file to be downloaded. You can see the progress of the download on the bottom right corner of your browser.
  • -
  • Once the file is downloaded, you can click on the play button and enjoy the movie with HD quality. You can also adjust the volume, brightness, contrast, and other settings of the video.
  • -
  • To install Amada Ap100 Software Crack, you need to open the folder that contains the crack files and copy them to the installation directory of Amada Ap100 Software.
  • -
  • Run Amada Ap100 Software and enjoy its full features.
  • -
- -

What are the Benefits of HD Online Player (Amada Ap100 Software Crack Download)?

- -

HD Online Player (Amada Ap100 Software Crack Download) is a software that can provide you with many benefits. Some of them are:

-

- -
    -
  • You can watch movies online with HD quality and without any hassle.
  • -
  • You can download and play any movie file from the internet with just a few clicks.
  • -
  • You can save money and time by not having to buy or rent DVDs or Blu-rays.
  • -
  • You can also install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication.
  • -
  • You can design and program sheet metal parts with ease and accuracy.
  • -
  • You can improve your productivity and efficiency by using advanced features and tools.
  • -
- -

In summary, HD Online Player (Amada Ap100 Software Crack Download) is a software that can help you watch movies online with HD quality and without any hassle. You can also use it to install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication. If you are interested in this software, you can download it from the official website and enjoy its benefits.

-

Conclusion

- -

HD Online Player (Amada Ap100 Software Crack Download) is a software that can help you watch movies online with HD quality and without any hassle. You can also use it to install Amada Ap100 Software Crack, which is a powerful CAD/CAM package for sheet metal fabrication. This software has many benefits, such as saving money and time, improving productivity and efficiency, and supporting various formats and quality settings. If you are looking for a way to watch movies online with HD quality and without any hassle, you should try HD Online Player (Amada Ap100 Software Crack Download). You can download it from the official website and enjoy its benefits.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/itsmohsinali/anpr1/README.md b/spaces/itsmohsinali/anpr1/README.md deleted file mode 100644 index 182b813a93686e310a9e0a29218245d5cafe5d13..0000000000000000000000000000000000000000 --- a/spaces/itsmohsinali/anpr1/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Automatic Number-Plate Recognition -emoji: 🚘 -colorFrom: red -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false ---- - -# Automatic Number Plate Recognition - -> AI to detect and recognize number plates on vehicles. - -## Table of contents - -- [General information](#general-information) -- [Dataset](#dataset) -- [How does it work](#how-does-it-work) - -## [Live demo](https://huggingface.co/spaces/itsyoboieltr/anpr) - -## General information - -This is an AI that was trained on images of number plates to carry out number plate detection and recognition. It works for both images and videos. Video detection also includes object tracking. - - - -## Dataset - -For this project, I created the [ANPR dataset](https://archive.org/details/anpr-dataset), a dataset of approx. 30k handpicked images of number plates. - -Annotations are in YOLO format. - - - -## How does it work - -Technologies used: - -- [YOLOv8](https://github.com/ultralytics/ultralytics): Object detection model to detect the number plate -- [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR): OCR to read the number plate -- [Deep SORT](https://github.com/levan92/deep_sort_realtime): Object tracking algorithm for video detection - -The YOLOv8 Model was fine-tuned using the ANPR dataset to detect number plates. When a number plate is detected, PaddleOCR is used to read the number plate. For video detection, Deep SORT is used to handle object tracking. diff --git a/spaces/jackli888/stable-diffusion-webui/modules/sd_disable_initialization.py b/spaces/jackli888/stable-diffusion-webui/modules/sd_disable_initialization.py deleted file mode 100644 index 50e4c180fc74988ec697e4cef2773bd2a785bccf..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/sd_disable_initialization.py +++ /dev/null @@ -1,93 +0,0 @@ -import ldm.modules.encoders.modules -import open_clip -import torch -import transformers.utils.hub - - -class DisableInitialization: - """ - When an object of this class enters a `with` block, it starts: - - preventing torch's layer initialization functions from working - - changes CLIP and OpenCLIP to not download model weights - - changes CLIP to not make requests to check if there is a new version of a file you already have - - When it leaves the block, it reverts everything to how it was before. - - Use it like this: - ``` - with DisableInitialization(): - do_things() - ``` - """ - - def __init__(self, disable_clip=True): - self.replaced = [] - self.disable_clip = disable_clip - - def replace(self, obj, field, func): - original = getattr(obj, field, None) - if original is None: - return None - - self.replaced.append((obj, field, original)) - setattr(obj, field, func) - - return original - - def __enter__(self): - def do_nothing(*args, **kwargs): - pass - - def create_model_and_transforms_without_pretrained(*args, pretrained=None, **kwargs): - return self.create_model_and_transforms(*args, pretrained=None, **kwargs) - - def CLIPTextModel_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs): - res = self.CLIPTextModel_from_pretrained(None, *model_args, config=pretrained_model_name_or_path, state_dict={}, **kwargs) - res.name_or_path = pretrained_model_name_or_path - return res - - def transformers_modeling_utils_load_pretrained_model(*args, **kwargs): - args = args[0:3] + ('/', ) + args[4:] # resolved_archive_file; must set it to something to prevent what seems to be a bug - return self.transformers_modeling_utils_load_pretrained_model(*args, **kwargs) - - def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs): - - # this file is always 404, prevent making request - if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json': - return None - - try: - res = original(url, *args, local_files_only=True, **kwargs) - if res is None: - res = original(url, *args, local_files_only=False, **kwargs) - return res - except Exception as e: - return original(url, *args, local_files_only=False, **kwargs) - - def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_utils_hub_get_from_cache, url, *args, **kwargs) - - def transformers_tokenization_utils_base_cached_file(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_tokenization_utils_base_cached_file, url, *args, **kwargs) - - def transformers_configuration_utils_cached_file(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_configuration_utils_cached_file, url, *args, **kwargs) - - self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing) - self.replace(torch.nn.init, '_no_grad_normal_', do_nothing) - self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing) - - if self.disable_clip: - self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained) - self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained) - self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model) - self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file) - self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file) - self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache) - - def __exit__(self, exc_type, exc_val, exc_tb): - for obj, field, original in self.replaced: - setattr(obj, field, original) - - self.replaced.clear() - diff --git a/spaces/jacobduncan00/Hosioka-AniReal/app.py b/spaces/jacobduncan00/Hosioka-AniReal/app.py deleted file mode 100644 index 64ef3890b5898557891038dd6d2bbfd633dc1048..0000000000000000000000000000000000000000 --- a/spaces/jacobduncan00/Hosioka-AniReal/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Hosioka/AniReal").launch() \ No newline at end of file diff --git a/spaces/jbetker/tortoise/models/xtransformers.py b/spaces/jbetker/tortoise/models/xtransformers.py deleted file mode 100644 index 70e8e63d3c7069306536331e0ae1421ed6ab89cd..0000000000000000000000000000000000000000 --- a/spaces/jbetker/tortoise/models/xtransformers.py +++ /dev/null @@ -1,1253 +0,0 @@ -import functools -import math -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple - -from einops import rearrange, repeat, reduce -from einops.layers.torch import Rearrange - -from entmax import entmax15 -from torch.utils.checkpoint import checkpoint - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates', - 'past_key_values', -]) - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def cast_tuple(val, depth): - return val if isinstance(val, tuple) else (val,) * depth - - -class always(): - def __init__(self, val): - self.val = val - - def __call__(self, *args, **kwargs): - return self.val - - -class not_equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x != self.val - - -class equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x == self.val - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -def l2norm(t): - return F.normalize(t, p=2, dim=-1) - - -# init helpers - -def init_zero_(layer): - nn.init.constant_(layer.weight, 0.) - if exists(layer.bias): - nn.init.constant_(layer.bias, 0.) - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# activations - -class ReluSquared(nn.Module): - def forward(self, x): - return F.relu(x) ** 2 - - -# positional embeddings - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.scale = dim ** -0.5 - self.emb = nn.Embedding(max_seq_len, dim) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - pos_emb = self.emb(n) - pos_emb = rearrange(pos_emb, 'n d -> () n d') - return pos_emb * self.scale - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return rearrange(emb, 'n d -> () n d') - - -class RelativePositionBias(nn.Module): - def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8): - super().__init__() - self.scale = scale - self.causal = causal - self.num_buckets = num_buckets - self.max_distance = max_distance - self.relative_attention_bias = nn.Embedding(num_buckets, heads) - - @staticmethod - def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128): - ret = 0 - n = -relative_position - if not causal: - num_buckets //= 2 - ret += (n < 0).long() * num_buckets - n = torch.abs(n) - else: - n = torch.max(n, torch.zeros_like(n)) - - max_exact = num_buckets // 2 - is_small = n < max_exact - - val_if_large = max_exact + ( - torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) - ).long() - val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) - - ret += torch.where(is_small, n, val_if_large) - return ret - - def forward(self, qk_dots): - i, j, device = *qk_dots.shape[-2:], qk_dots.device - q_pos = torch.arange(i, dtype=torch.long, device=device) - k_pos = torch.arange(j, dtype=torch.long, device=device) - rel_pos = k_pos[None, :] - q_pos[:, None] - rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets, - max_distance=self.max_distance) - values = self.relative_attention_bias(rp_bucket) - bias = rearrange(values, 'i j h -> () h i j') - return qk_dots + (bias * self.scale) - - -class AlibiPositionalBias(nn.Module): - def __init__(self, heads, **kwargs): - super().__init__() - self.heads = heads - slopes = torch.Tensor(self._get_slopes(heads)) - slopes = rearrange(slopes, 'h -> () h () ()') - self.register_buffer('slopes', slopes, persistent=False) - self.register_buffer('bias', None, persistent=False) - - @staticmethod - def _get_slopes(heads): - def get_slopes_power_of_2(n): - start = (2 ** (-2 ** -(math.log2(n) - 3))) - ratio = start - return [start * ratio ** i for i in range(n)] - - if math.log2(heads).is_integer(): - return get_slopes_power_of_2(heads) - - closest_power_of_2 = 2 ** math.floor(math.log2(heads)) - return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][ - :heads - closest_power_of_2] - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - if exists(self.bias) and self.bias.shape[-1] >= j: - return qk_dots + self.bias[..., :j] - - bias = torch.arange(j, device=device) - bias = rearrange(bias, 'j -> () () () j') - bias = bias * self.slopes - - num_heads_unalibied = h - bias.shape[1] - bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied)) - - self.register_buffer('bias', bias, persistent=False) - return qk_dots + self.bias - - -class LearnedAlibiPositionalBias(AlibiPositionalBias): - def __init__(self, heads, bidirectional=False): - super().__init__(heads) - los_slopes = torch.log(self.slopes) - self.learned_logslopes = nn.Parameter(los_slopes) - - self.bidirectional = bidirectional - if self.bidirectional: - self.learned_logslopes_future = nn.Parameter(los_slopes) - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - def get_slopes(param): - return F.pad(param.exp(), (0, 0, 0, 0, 0, h - param.shape[1])) - - if exists(self.bias) and self.bias.shape[-1] >= j: - bias = self.bias[..., :i, :j] - else: - i_arange = torch.arange(i, device=device) - j_arange = torch.arange(j, device=device) - bias = rearrange(j_arange, 'j -> 1 1 1 j') - rearrange(i_arange, 'i -> 1 1 i 1') - self.register_buffer('bias', bias, persistent=False) - - if self.bidirectional: - past_slopes = get_slopes(self.learned_logslopes) - future_slopes = get_slopes(self.learned_logslopes_future) - bias = torch.tril(bias * past_slopes) + torch.triu(bias * future_slopes) - else: - slopes = get_slopes(self.learned_logslopes) - bias = bias * slopes - - return qk_dots + bias - - -class RotaryEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, max_seq_len, device): - t = torch.arange(max_seq_len, device=device).type_as(self.inv_freq) - freqs = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1) - return rearrange(emb, 'n d -> () () n d') - - -def rotate_half(x): - x = rearrange(x, '... (j d) -> ... j d', j=2) - x1, x2 = x.unbind(dim=-2) - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(t, freqs): - seq_len = t.shape[-2] - freqs = freqs[:, :, -seq_len:] - return (t * freqs.cos()) + (rotate_half(t) * freqs.sin()) - - -# norms - -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - scale_fn = lambda t: t * self.value - - if not isinstance(out, tuple): - return scale_fn(out) - - return (scale_fn(out[0]), *out[1:]) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - rezero_fn = lambda t: t * self.g - - if not isinstance(out, tuple): - return rezero_fn(out) - - return (rezero_fn(out[0]), *out[1:]) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSScaleShiftNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - self.scale_shift_process = nn.Linear(dim * 2, dim * 2) - - def forward(self, x, norm_scale_shift_inp): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - norm = x / norm.clamp(min=self.eps) * self.g - - ss_emb = self.scale_shift_process(norm_scale_shift_inp) - scale, shift = torch.chunk(ss_emb, 2, dim=1) - h = norm * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) - return h - - -# residual and residual gates - -class Residual(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# token shifting - -def shift(t, amount, mask=None): - if amount == 0: - return t - - if exists(mask): - t = t.masked_fill(~mask[..., None], 0.) - - return F.pad(t, (0, 0, amount, -amount), value=0.) - - -class ShiftTokens(nn.Module): - def __init__(self, shifts, fn): - super().__init__() - self.fn = fn - self.shifts = tuple(shifts) - - def forward(self, x, **kwargs): - mask = kwargs.get('mask', None) - shifts = self.shifts - segments = len(shifts) - feats_per_shift = x.shape[-1] // segments - splitted = x.split(feats_per_shift, dim=-1) - segments_to_shift, rest = splitted[:segments], splitted[segments:] - segments_to_shift = list(map(lambda args: shift(*args, mask=mask), zip(segments_to_shift, shifts))) - x = torch.cat((*segments_to_shift, *rest), dim=-1) - return self.fn(x, **kwargs) - - -# feedforward - -class GLU(nn.Module): - def __init__(self, dim_in, dim_out, activation): - super().__init__() - self.act = activation - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * self.act(gate) - - -class FeedForward(nn.Module): - def __init__( - self, - dim, - dim_out=None, - mult=4, - glu=False, - relu_squared=False, - post_act_ln=False, - dropout=0., - zero_init_output=False - ): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - activation = ReluSquared() if relu_squared else nn.GELU() - - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - activation - ) if not glu else GLU(dim, inner_dim, activation) - - self.net = nn.Sequential( - project_in, - nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(), - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - # init last linear layer to 0 - if zero_init_output: - init_zero_(self.net[-1]) - - def forward(self, x): - return self.net(x) - - -# attention. - -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - talking_heads=False, - head_scale=False, - collab_heads=False, - collab_compression=.3, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False, - gate_values=False, - zero_init_output=False, - max_attend_past=None, - qk_norm=False, - scale_init_value=None, - rel_pos_bias=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - ): - super().__init__() - self.scale = dim_head ** -0.5 - - self.heads = heads - self.causal = causal - self.max_attend_past = max_attend_past - - qk_dim = v_dim = dim_head * heads - - # collaborative heads - self.collab_heads = collab_heads - if self.collab_heads: - qk_dim = int(collab_compression * qk_dim) - self.collab_mixing = nn.Parameter(torch.randn(heads, qk_dim)) - - self.to_q = nn.Linear(dim, qk_dim, bias=False) - self.to_k = nn.Linear(dim, qk_dim, bias=False) - self.to_v = nn.Linear(dim, v_dim, bias=False) - - self.dropout = nn.Dropout(dropout) - - # add GLU gating for aggregated values, from alphafold2 - self.to_v_gate = None - if gate_values: - self.to_v_gate = nn.Linear(dim, v_dim) - nn.init.constant_(self.to_v_gate.weight, 0) - nn.init.constant_(self.to_v_gate.bias, 1) - - # cosine sim attention - self.qk_norm = qk_norm - if qk_norm: - scale_init_value = default(scale_init_value, - -3) # if not provided, initialize as though it were sequence length of 1024 - self.scale = nn.Parameter(torch.ones(1, heads, 1, 1) * scale_init_value) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # head scaling - self.head_scale = head_scale - if head_scale: - self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - self.attn_fn = entmax15 if use_entmax15 else F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(v_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(v_dim, dim) - - self.rel_pos_bias = rel_pos_bias - if rel_pos_bias: - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = RelativePositionBias(scale=dim_head ** 0.5, causal=causal, heads=heads, - num_buckets=rel_pos_num_buckets, max_distance=rel_pos_max_distance) - - # init output projection 0 - if zero_init_output: - init_zero_(self.to_out) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - attn_mask=None, - sinusoidal_emb=None, - rotary_pos_emb=None, - prev_attn=None, - mem=None, - layer_past=None, - ): - b, n, _, h, talking_heads, collab_heads, head_scale, scale, device, has_context = *x.shape, self.heads, self.talking_heads, self.collab_heads, self.head_scale, self.scale, x.device, exists( - context) - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - if not collab_heads: - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - else: - q = einsum('b i d, h d -> b h i d', q, self.collab_mixing) - k = rearrange(k, 'b n d -> b () n d') - v = rearrange(v, 'b n (h d) -> b h n d', h=h) - - if layer_past is not None: - past_key, past_value = layer_past - k = torch.cat([past_key, k], dim=-2) - v = torch.cat([past_value, v], dim=-2) - k_cache = k - v_cache = v - - if exists(rotary_pos_emb) and not has_context: - l = rotary_pos_emb.shape[-1] - (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) - ql, kl, vl = map(lambda t: apply_rotary_pos_emb(t, rotary_pos_emb), (ql, kl, vl)) - q, k, v = map(lambda t: torch.cat(t, dim=-1), ((ql, qr), (kl, kr), (vl, vr))) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - if collab_heads: - k = k.expand(-1, h, -1, -1) - - if self.qk_norm: - q, k = map(l2norm, (q, k)) - scale = 1 / (self.scale.exp().clamp(min=1e-2)) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots.clone() - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if self.rel_pos_bias: - dots = self.rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if exists(attn_mask): - assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' - if attn_mask.ndim == 2: - attn_mask = rearrange(attn_mask, 'i j -> () () i j') - elif attn_mask.ndim == 3: - attn_mask = rearrange(attn_mask, 'h i j -> () h i j') - dots.masked_fill_(~attn_mask, mask_value) - - if exists(self.max_attend_past): - i, j = dots.shape[-2:] - range_q = torch.arange(j - i, j, device=device) - range_k = torch.arange(j, device=device) - dist = rearrange(range_q, 'i -> () () i ()') - rearrange(range_k, 'j -> () () () j') - mask = dist > self.max_attend_past - dots.masked_fill_(mask, mask_value) - del mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn.clone() - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - - if head_scale: - out = out * self.head_scale_params - - out = rearrange(out, 'b h n d -> b n (h d)') - - if exists(self.to_v_gate): - gates = self.to_v_gate(x) - out = out * gates.sigmoid() - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates, k_cache, v_cache - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rms_scaleshift_norm=False, - use_rmsnorm=False, - use_rezero=False, - alibi_pos_bias=False, - alibi_num_heads=None, - alibi_learned=False, - position_infused_attn=False, - rotary_pos_emb=False, - rotary_emb_dim=None, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - scale_residual=False, - shift_tokens=0, - sandwich_norm=False, - use_qk_norm_attn=False, - qk_norm_attn_seq_len=None, - zero_init_branch_output=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - self.causal = causal - - rel_pos_bias = 'rel_pos_bias' in attn_kwargs - self.has_pos_emb = position_infused_attn or rel_pos_bias or rotary_pos_emb - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - - rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) - self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) if rotary_pos_emb else None - - assert not ( - alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' - - if alibi_pos_bias: - alibi_num_heads = default(alibi_num_heads, heads) - assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' - alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned or not causal else AlibiPositionalBias - self.rel_pos = alibi_pos_klass(heads=alibi_num_heads, bidirectional=not causal) - else: - self.rel_pos = None - - assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' - self.pre_norm = pre_norm - self.sandwich_norm = sandwich_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - self.cross_attend = cross_attend - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_class = RMSScaleShiftNorm if use_rms_scaleshift_norm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - # qk normalization - - if use_qk_norm_attn: - attn_scale_init_value = -math.log(math.log2(qk_norm_attn_seq_len ** 2 - qk_norm_attn_seq_len)) if exists( - qk_norm_attn_seq_len) else None - attn_kwargs = {**attn_kwargs, 'qk_norm': True, 'scale_init_value': attn_scale_init_value} - - # zero init - - if zero_init_branch_output: - attn_kwargs = {**attn_kwargs, 'zero_init_output': True} - ff_kwargs = {**ff_kwargs, 'zero_init_output': True} - - # calculate layer block order - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - # calculate token shifting - - shift_tokens = cast_tuple(shift_tokens, len(layer_types)) - - # iterate and construct layers - - for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): - is_last_layer = ind == (len(self.layer_types) - 1) - - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if layer_shift_tokens > 0: - shift_range_upper = layer_shift_tokens + 1 - shift_range_lower = -layer_shift_tokens if not causal else 0 - layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) - - if exists(branch_fn): - layer = branch_fn(layer) - - residual_fn = GRUGating if gate_residual else Residual - residual = residual_fn(dim, scale_residual=scale_residual) - - layer_uses_qk_norm = use_qk_norm_attn and layer_type in ('a', 'c') - - pre_branch_norm = norm_fn() if pre_norm and not layer_uses_qk_norm else None - post_branch_norm = norm_fn() if sandwich_norm or layer_uses_qk_norm else None - post_main_norm = norm_fn() if not pre_norm and not is_last_layer else None - - norms = nn.ModuleList([ - pre_branch_norm, - post_branch_norm, - post_main_norm - ]) - - self.layers.append(nn.ModuleList([ - norms, - layer, - residual - ])) - - def forward( - self, - x, - context=None, - full_context=None, # for passing a list of hidden states from an encoder - mask=None, - context_mask=None, - attn_mask=None, - mems=None, - return_hiddens=False, - norm_scale_shift_inp=None, - past_key_values=None, - expected_seq_len=None, - ): - - assert not (self.cross_attend ^ (exists(context) or exists( - full_context))), 'context must be passed in if cross_attend is set to True' - assert context is None or full_context is None, 'only one of full_context or context can be provided' - - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - norm_args = {} - if exists(norm_scale_shift_inp): - norm_args['norm_scale_shift_inp'] = norm_scale_shift_inp - - rotary_pos_emb = None - if exists(self.rotary_pos_emb): - if not self.training and self.causal: - assert expected_seq_len is not None, "To decode a transformer with rotary embeddings, you must specify an `expected_seq_len`" - elif expected_seq_len is None: - expected_seq_len = 0 - seq_len = x.shape[1] - if past_key_values is not None: - seq_len += past_key_values[0][0].shape[-2] - max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + seq_len, mems)) + [expected_seq_len]) - rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) - - present_key_values = [] - cross_attn_count = 0 - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - if layer_type == 'a': - layer_mem = mems.pop(0) if mems else None - - residual = x - - pre_branch_norm, post_branch_norm, post_main_norm = norm - - if exists(pre_branch_norm): - x = pre_branch_norm(x, **norm_args) - - if layer_type == 'a' or layer_type == 'c': - if past_key_values is not None: - layer_kv = past_key_values.pop(0) - layer_past = tuple(s.to(x.device) for s in layer_kv) - else: - layer_past = None - - if layer_type == 'a': - out, inter, k, v = checkpoint(block, x, None, mask, None, attn_mask, self.pia_pos_emb, rotary_pos_emb, - prev_attn, layer_mem, layer_past) - elif layer_type == 'c': - if exists(full_context): - out, inter, k, v = checkpoint(block, x, full_context[cross_attn_count], mask, context_mask, None, None, - None, prev_attn, None, layer_past) - else: - out, inter, k, v = checkpoint(block, x, context, mask, context_mask, None, None, None, prev_attn, None, layer_past) - elif layer_type == 'f': - out = checkpoint(block, x) - - if layer_type == 'a' or layer_type == 'c' and present_key_values is not None: - present_key_values.append((k.detach(), v.detach())) - - if exists(post_branch_norm): - out = post_branch_norm(out, **norm_args) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if exists(post_main_norm): - x = post_main_norm(x, **norm_args) - - if layer_type == 'c': - cross_attn_count += 1 - - if layer_type == 'f': - hiddens.append(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates, - past_key_values=present_key_values - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - -class Decoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on decoder' - super().__init__(causal=True, **kwargs) - - -class CrossAttender(AttentionLayers): - def __init__(self, **kwargs): - super().__init__(cross_attend=True, only_cross=True, **kwargs) - - -class ViTransformerWrapper(nn.Module): - def __init__( - self, - *, - image_size, - patch_size, - attn_layers, - num_classes=None, - dropout=0., - emb_dropout=0. - ): - super().__init__() - assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' - assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' - dim = attn_layers.dim - num_patches = (image_size // patch_size) ** 2 - patch_dim = 3 * patch_size ** 2 - - self.patch_size = patch_size - - self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) - self.patch_to_embedding = nn.Linear(patch_dim, dim) - self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) - self.dropout = nn.Dropout(emb_dropout) - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - self.mlp_head = FeedForward(dim, dim_out=num_classes, dropout=dropout) if exists(num_classes) else None - - def forward( - self, - img, - return_embeddings=False - ): - p = self.patch_size - - x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) - x = self.patch_to_embedding(x) - b, n, _ = x.shape - - cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b) - x = torch.cat((cls_tokens, x), dim=1) - x = x + self.pos_embedding[:, :(n + 1)] - x = self.dropout(x) - - x = self.attn_layers(x) - x = self.norm(x) - - if not exists(self.mlp_head) or return_embeddings: - return x - - return self.mlp_head(x[:, 0]) - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - shift_mem_down=0, - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.shift_mem_down = shift_mem_down - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - def init_(self): - nn.init.kaiming_normal_(self.token_emb.weight) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_hiddens=False, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - if self.shift_mem_down and exists(mems): - mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] - mems = [*mems_r, *mems_l] - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_hiddens: - hiddens = intermediates.hiddens - return out, hiddens - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - - -class ContinuousTransformerWrapper(nn.Module): - def __init__( - self, - *, - max_seq_len, - attn_layers, - dim_in=None, - dim_out=None, - emb_dim=None, - emb_dropout=0., - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - - self.max_seq_len = max_seq_len - - self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity() - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity() - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, _, device = *x.shape, x.device - - x = self.project_in(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - out = self.project_out(x) if not return_embeddings else x - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/components/icons/hugging-clap.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/components/icons/hugging-clap.tsx deleted file mode 100644 index ffb37ae6183cd8ce7fe7c212e383a6510eba2485..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/components/icons/hugging-clap.tsx +++ /dev/null @@ -1,8 +0,0 @@ -export function HuggingClap() { - return ( - - ) -} \ No newline at end of file diff --git a/spaces/jbyun/music-separation/app.py b/spaces/jbyun/music-separation/app.py deleted file mode 100644 index 43ddf2d955852cb924675d5c86b5fb5195affdb5..0000000000000000000000000000000000000000 --- a/spaces/jbyun/music-separation/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import gradio as gr -from scipy.io.wavfile import write - - -def inference(audio): - os.makedirs("out", exist_ok=True) - write('test.wav', audio[0], audio[1]) - os.system("python3 -m demucs.separate -n htdemucs --two-stems=vocals test.wav -o out") - return "./out/htdemucs/test/vocals.wav","./out/htdemucs/test/no_vocals.wav" - -title = "Demucs Music Source Separation (v4)" -article = "

Music Source Separation in the Waveform Domain | Github Repo | //THAFX

" - -examples=[['test.mp3']] -gr.Interface( - inference, - gr.Audio(type="numpy", label="Input"), - [gr.Audio(type="filepath", label="Vocals"),gr.Audio(type="filepath", label="No Vocals / Instrumental")], - title=title, - article=article, - examples=examples - ).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/training/modules/squeeze_excitation.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/training/modules/squeeze_excitation.py deleted file mode 100644 index d1d902bb30c071acbc0fa919a134c80fed86bd6c..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/saicinpainting/training/modules/squeeze_excitation.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch.nn as nn - - -class SELayer(nn.Module): - def __init__(self, channel, reduction=16): - super(SELayer, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid() - ) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - res = x * y.expand_as(x) - return res diff --git a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig4a_Hyperparameter_radius.py b/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig4a_Hyperparameter_radius.py deleted file mode 100644 index cbb71a29d470f57a2614fba16196523bf8b2d513..0000000000000000000000000000000000000000 --- a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig4a_Hyperparameter_radius.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# Author: LE YUAN -# Date: 2020-11-06 -# https://blog.csdn.net/roguesir/article/details/77839721 - -import matplotlib.pyplot as plt -from matplotlib import rc - - -with open('../../Data/output_hyper/MAEs--all--radius0--ngram1--dim10--layer_gnn3--window11--layer_cnn3--layer_output3--lr1e-3--lr_decay0.5--decay_interval10--weight_decay1e-6--iteration50.txt', 'r') as infile1 : - lines1 = infile1.readlines()[1:] - -with open('../../Data/output_hyper/MAEs--all--radius1--ngram2--dim10--layer_gnn3--window11--layer_cnn3--layer_output3--lr1e-3--lr_decay0.5--decay_interval10--weight_decay1e-6--iteration50.txt', 'r') as infile2 : - lines2 = infile2.readlines()[1:] - -with open('../../Data/output_hyper/MAEs--all--radius2--ngram3--dim10--layer_gnn3--window11--layer_cnn3--layer_output3--lr1e-3--lr_decay0.5--decay_interval10--weight_decay1e-6--iteration50.txt', 'r') as infile3 : - lines3 = infile3.readlines()[1:] - -epoch_1 = list() -R2_1 = list() -for line in lines1[:30] : - data = line.strip().split('\t') - # print(data) - epoch_line = int(data[0]) - R2_line = float(data[-2]) - if epoch_line%2 == 0 or epoch_line in [1,30] : - epoch_1.append(epoch_line) - R2_1.append(R2_line) - -epoch_2 = list() -R2_2 = list() -for line in lines2[:30] : - data = line.strip().split('\t') - # print(data) - epoch_line = int(data[0]) - R2_line = float(data[-2]) - if epoch_line%2 == 0 or epoch_line in [1,30] : - epoch_2.append(epoch_line) - R2_2.append(R2_line) - -epoch_3 = list() -R2_3 = list() -for line in lines3[:30] : - data = line.strip().split('\t') - # print(data) - epoch_line = int(data[0]) - R2_line = float(data[-2]) - if epoch_line%2 == 0 or epoch_line in [1,30] : - epoch_3.append(epoch_line) - R2_3.append(R2_line) - -plt.figure(figsize=(1.5,1.5)) - -# To solve the 'Helvetica' font cannot be used in PDF file -# https://stackoverflow.com/questions/59845568/the-pdf-backend-does-not-currently-support-the-selected-font -rc('font',**{'family':'serif','serif':['Helvetica']}) -plt.rcParams['pdf.fonttype'] = 42 - -plt.axes([0.12,0.12,0.83,0.83]) - -# plt.rcParams['xtick.direction'] = 'in' -# plt.rcParams['ytick.direction'] = 'in' - -plt.tick_params(direction='in') -plt.tick_params(which='major',length=1.5) -plt.tick_params(which='major',width=0.4) - -plt.plot(epoch_1,R2_1,color='#FC9E05',linestyle='dashed',linewidth=0.75,marker='s',markerfacecolor='#FC9E05', markersize=1,label='0-radius & 1-gram') -plt.plot(epoch_2,R2_2,color='#2166ac',linestyle='dashed',linewidth=0.75,marker='^',markerfacecolor='#2166ac', markersize=1,label='1-radius & 2-gram') -plt.plot(epoch_3,R2_3,color='#b2182b',linestyle='dashed',linewidth=0.75,marker='o',markerfacecolor='#b2182b', markersize=1,label='2-radius & 3-gram') - -plt.rcParams['font.family'] = 'Helvetica' - -plt.xticks([0,5,10,15,20,25,30]) -plt.yticks([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7]) -# plt.yticks([0,0.2,0.4,0.6,0.8]) - -plt.xlabel('Epoch', fontsize=7) -# plt.ylabel('R2', fontsize=7) -plt.ylabel('R$^2$', fontsize=7) -plt.xticks(fontsize=6) -plt.yticks(fontsize=6) -plt.legend(frameon=False, prop={"size":6}) - -ax = plt.gca() -ax.spines['bottom'].set_linewidth(0.5) -ax.spines['left'].set_linewidth(0.5) -ax.spines['top'].set_linewidth(0.5) -ax.spines['right'].set_linewidth(0.5) - -plt.savefig("../../Results/figures/SuppleFig4a.pdf", dpi=400, bbox_inches='tight') - - diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/PublicKey/ElGamal.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/PublicKey/ElGamal.py deleted file mode 100644 index 3b1084056b612768775d735d6f6bc5c7eece1d63..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/PublicKey/ElGamal.py +++ /dev/null @@ -1,286 +0,0 @@ -# -# ElGamal.py : ElGamal encryption/decryption and signatures -# -# Part of the Python Cryptography Toolkit -# -# Originally written by: A.M. Kuchling -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -__all__ = ['generate', 'construct', 'ElGamalKey'] - -from Crypto import Random -from Crypto.Math.Primality import ( generate_probable_safe_prime, - test_probable_prime, COMPOSITE ) -from Crypto.Math.Numbers import Integer - -# Generate an ElGamal key with N bits -def generate(bits, randfunc): - """Randomly generate a fresh, new ElGamal key. - - The key will be safe for use for both encryption and signature - (although it should be used for **only one** purpose). - - Args: - bits (int): - Key length, or size (in bits) of the modulus *p*. - The recommended value is 2048. - randfunc (callable): - Random number generation function; it should accept - a single integer *N* and return a string of random - *N* random bytes. - - Return: - an :class:`ElGamalKey` object - """ - - obj=ElGamalKey() - - # Generate a safe prime p - # See Algorithm 4.86 in Handbook of Applied Cryptography - obj.p = generate_probable_safe_prime(exact_bits=bits, randfunc=randfunc) - q = (obj.p - 1) >> 1 - - # Generate generator g - while 1: - # Choose a square residue; it will generate a cyclic group of order q. - obj.g = pow(Integer.random_range(min_inclusive=2, - max_exclusive=obj.p, - randfunc=randfunc), 2, obj.p) - - # We must avoid g=2 because of Bleichenbacher's attack described - # in "Generating ElGamal signatures without knowning the secret key", - # 1996 - if obj.g in (1, 2): - continue - - # Discard g if it divides p-1 because of the attack described - # in Note 11.67 (iii) in HAC - if (obj.p - 1) % obj.g == 0: - continue - - # g^{-1} must not divide p-1 because of Khadir's attack - # described in "Conditions of the generator for forging ElGamal - # signature", 2011 - ginv = obj.g.inverse(obj.p) - if (obj.p - 1) % ginv == 0: - continue - - # Found - break - - # Generate private key x - obj.x = Integer.random_range(min_inclusive=2, - max_exclusive=obj.p-1, - randfunc=randfunc) - # Generate public key y - obj.y = pow(obj.g, obj.x, obj.p) - return obj - -def construct(tup): - r"""Construct an ElGamal key from a tuple of valid ElGamal components. - - The modulus *p* must be a prime. - The following conditions must apply: - - .. math:: - - \begin{align} - &1 < g < p-1 \\ - &g^{p-1} = 1 \text{ mod } 1 \\ - &1 < x < p-1 \\ - &g^x = y \text{ mod } p - \end{align} - - Args: - tup (tuple): - A tuple with either 3 or 4 integers, - in the following order: - - 1. Modulus (*p*). - 2. Generator (*g*). - 3. Public key (*y*). - 4. Private key (*x*). Optional. - - Raises: - ValueError: when the key being imported fails the most basic ElGamal validity checks. - - Returns: - an :class:`ElGamalKey` object - """ - - obj=ElGamalKey() - if len(tup) not in [3,4]: - raise ValueError('argument for construct() wrong length') - for i in range(len(tup)): - field = obj._keydata[i] - setattr(obj, field, Integer(tup[i])) - - fmt_error = test_probable_prime(obj.p) == COMPOSITE - fmt_error |= obj.g<=1 or obj.g>=obj.p - fmt_error |= pow(obj.g, obj.p-1, obj.p)!=1 - fmt_error |= obj.y<1 or obj.y>=obj.p - if len(tup)==4: - fmt_error |= obj.x<=1 or obj.x>=obj.p - fmt_error |= pow(obj.g, obj.x, obj.p)!=obj.y - - if fmt_error: - raise ValueError("Invalid ElGamal key components") - - return obj - -class ElGamalKey(object): - r"""Class defining an ElGamal key. - Do not instantiate directly. - Use :func:`generate` or :func:`construct` instead. - - :ivar p: Modulus - :vartype d: integer - - :ivar g: Generator - :vartype e: integer - - :ivar y: Public key component - :vartype y: integer - - :ivar x: Private key component - :vartype x: integer - """ - - #: Dictionary of ElGamal parameters. - #: - #: A public key will only have the following entries: - #: - #: - **y**, the public key. - #: - **g**, the generator. - #: - **p**, the modulus. - #: - #: A private key will also have: - #: - #: - **x**, the private key. - _keydata=['p', 'g', 'y', 'x'] - - def __init__(self, randfunc=None): - if randfunc is None: - randfunc = Random.new().read - self._randfunc = randfunc - - def _encrypt(self, M, K): - a=pow(self.g, K, self.p) - b=( pow(self.y, K, self.p)*M ) % self.p - return [int(a), int(b)] - - def _decrypt(self, M): - if (not hasattr(self, 'x')): - raise TypeError('Private key not available in this object') - r = Integer.random_range(min_inclusive=2, - max_exclusive=self.p-1, - randfunc=self._randfunc) - a_blind = (pow(self.g, r, self.p) * M[0]) % self.p - ax=pow(a_blind, self.x, self.p) - plaintext_blind = (ax.inverse(self.p) * M[1] ) % self.p - plaintext = (plaintext_blind * pow(self.y, r, self.p)) % self.p - return int(plaintext) - - def _sign(self, M, K): - if (not hasattr(self, 'x')): - raise TypeError('Private key not available in this object') - p1=self.p-1 - K = Integer(K) - if (K.gcd(p1)!=1): - raise ValueError('Bad K value: GCD(K,p-1)!=1') - a=pow(self.g, K, self.p) - t=(Integer(M)-self.x*a) % p1 - while t<0: t=t+p1 - b=(t*K.inverse(p1)) % p1 - return [int(a), int(b)] - - def _verify(self, M, sig): - sig = [Integer(x) for x in sig] - if sig[0]<1 or sig[0]>self.p-1: - return 0 - v1=pow(self.y, sig[0], self.p) - v1=(v1*pow(sig[0], sig[1], self.p)) % self.p - v2=pow(self.g, M, self.p) - if v1==v2: - return 1 - return 0 - - def has_private(self): - """Whether this is an ElGamal private key""" - - if hasattr(self, 'x'): - return 1 - else: - return 0 - - def can_encrypt(self): - return True - - def can_sign(self): - return True - - def publickey(self): - """A matching ElGamal public key. - - Returns: - a new :class:`ElGamalKey` object - """ - return construct((self.p, self.g, self.y)) - - def __eq__(self, other): - if bool(self.has_private()) != bool(other.has_private()): - return False - - result = True - for comp in self._keydata: - result = result and (getattr(self.key, comp, None) == - getattr(other.key, comp, None)) - return result - - def __ne__(self, other): - return not self.__eq__(other) - - def __getstate__(self): - # ElGamal key is not pickable - from pickle import PicklingError - raise PicklingError - - # Methods defined in PyCrypto that we don't support anymore - - def sign(self, M, K): - raise NotImplementedError - - def verify(self, M, signature): - raise NotImplementedError - - def encrypt(self, plaintext, K): - raise NotImplementedError - - def decrypt(self, ciphertext): - raise NotImplementedError - - def blind(self, M, B): - raise NotImplementedError - - def unblind(self, M, B): - raise NotImplementedError - - def size(self): - raise NotImplementedError diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py deleted file mode 100644 index ed00764f7c193ca9bcd0bf67196da59c30048a28..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -"""fontTools.ttLib -- a package for dealing with TrueType fonts.""" - -from fontTools.misc.loggingTools import deprecateFunction -import logging - - -log = logging.getLogger(__name__) - - -class TTLibError(Exception): - pass - - -class TTLibFileIsCollectionError(TTLibError): - pass - - -@deprecateFunction("use logging instead", category=DeprecationWarning) -def debugmsg(msg): - import time - - print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) - - -from fontTools.ttLib.ttFont import * -from fontTools.ttLib.ttCollection import TTCollection diff --git a/spaces/jordonpeter01/MusicGen/Makefile b/spaces/jordonpeter01/MusicGen/Makefile deleted file mode 100644 index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -default: linter tests - -install: - pip install -U pip - pip install -U -e '.[dev]' - -linter: - flake8 audiocraft && mypy audiocraft - flake8 tests && mypy tests - -tests: - coverage run -m pytest tests - coverage report --include 'audiocraft/*' - -docs: - pdoc3 --html -o docs -f audiocraft - -dist: - python setup.py sdist - -.PHONY: linter tests docs dist diff --git a/spaces/junkmind/SOTER/app.py b/spaces/junkmind/SOTER/app.py deleted file mode 100644 index 88458ba6cef00dc648c9223e339acc06d707aa0c..0000000000000000000000000000000000000000 --- a/spaces/junkmind/SOTER/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import argparse -import os -import re -import time - -import torch -from kernel_utils import VideoReader, FaceExtractor, confident_strategy, predict_on_video -from training.zoo.classifiers import DeepFakeClassifier - - -import gradio as gr - -def model_fn(model_dir): - model_path = os.path.join(model_dir, 'b7_ns_best.pth') - model = DeepFakeClassifier(encoder="tf_efficientnet_b7_ns") # default: CPU - checkpoint = torch.load(model_path, map_location="cpu") - state_dict = checkpoint.get("state_dict", checkpoint) - model.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=True) - model.eval() - del checkpoint - #models.append(model.half()) - - return model - -def convert_result(pred, class_names=["Real", "Fake"]): - preds = [pred, 1 - pred] - assert len(class_names) == len(preds), "Class / Prediction should have the same length" - return {n: float(p) for n, p in zip(class_names, preds)} - -def predict_fn(video): - start = time.time() - prediction = predict_on_video(face_extractor=meta["face_extractor"], - video_path=video, - batch_size=meta["fps"], - input_size=meta["input_size"], - models=model, - strategy=meta["strategy"], - apply_compression=False, - device='cpu') - - elapsed_time = round(time.time() - start, 2) - - prediction = convert_result(prediction) - - return prediction, elapsed_time - -# Create title, description and article strings -title = "SOTER" -description = "Preserving authenticity: Uncovering deepfakes with innovative technology" - -example_list = ["examples/" + str(p) for p in os.listdir("examples/")] - -# Environments -model_dir = 'weights' -frames_per_video = 32 -video_reader = VideoReader() -video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) -face_extractor = FaceExtractor(video_read_fn) -input_size = 380 -strategy = confident_strategy -class_names = ["Real", "Fake"] - -meta = {"fps": 32, - "face_extractor": face_extractor, - "input_size": input_size, - "strategy": strategy} - -model = model_fn(model_dir) - -""" -if __name__ == '__main__': - video_path = "examples/nlurbvsozt.mp4" - model = model_fn(model_dir) - a, b = predict_fn(video_path) - print(a, b) -""" -# Create the Gradio demo -demo = gr.Interface(fn=predict_fn, # mapping function from input to output - inputs=gr.Video(), - outputs=[gr.Label(num_top_classes=2, label="Predictions"), # what are the outputs? - gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs - examples=example_list, - title=title, - description=description) - -# Launch the demo! -demo.launch(debug=False,) # Hugging face space don't need shareable_links diff --git a/spaces/kaleidoscope-data/data-cleaning-llm/app/dataclean_hf.py b/spaces/kaleidoscope-data/data-cleaning-llm/app/dataclean_hf.py deleted file mode 100644 index 34e2c313afd597ea9b2d46d98c95bf2192f58c2d..0000000000000000000000000000000000000000 --- a/spaces/kaleidoscope-data/data-cleaning-llm/app/dataclean_hf.py +++ /dev/null @@ -1,153 +0,0 @@ -import json -from io import StringIO -import csv -import pandas as pd -import numpy as np -import streamlit as st - -from sqlalchemy import create_engine -# from yallmf.utils import run_with_timeout -import openai - -from tqdm import tqdm - - -openai.api_key = st.secrets["OPENAI_API_KEY"] - -def clean_data( - input_product_names: pd.Series, - input_brands: pd.Series, - input_product_categories: pd.Series, - category_taxonomy: dict): - - output_cols = ['brand', 'product_category', 'sub_product_category', 'strain_name'] - ncols = len(output_cols) - - p1 = f''' - I am going to provide a data set of marijuana products and their metadata. Using the information I provide, I want you to provide me with the following information about the products. - - - Brand (brand) - - product category (product_category) - - sub product category (sub_product_category) - - strain name (strain_name) - - The following JSON shows all the acceptable Product Categories and their Sub Product Categories. Strictly adhere to the below mapping for valid product_category to sub_product_category relationships: - - {json.dumps(category_taxonomy)} - - Additional requirements: - - - The input data set in CSV format, with commas as field delimiter and newline as row delimiter. - - Do not automatically assume that the information in the data set I provide is accurate. - - Leave the 'sub_product_category' field blank unless there's a clear and direct match with one of the categories provided in the list.If there is no explicit information to confidently assign a sub_product_category, default to leaving it blank. - - Strain names are only applicable for the following product categories: concentrate, preroll, vape, flower - - Look for clues in the product name to determine what brand/ product category/ sub product category/ and strain name the product should fall under. For Vape products, consider the words before 'Cartridge' or 'Cart' in the product name as potential strain names. - - Every row of the Output CSV must have EXACTLY {ncols} columns. - - When a field is left empty (e.g., 'sub_product_category' or 'strain_name'), simply leave it empty without placing an additional comma. Each row in the output CSV should always have only three commas separating the four fields regardless of whether some fields are empty. For instance, if 'sub_product_category' and 'strain_name' are empty, a row would look like this: "brand,product_category,," - - DO NOT EXPLAIN YOURSELF, ONLY RETURN A CSV WITH THESE COLUMNS: {', '.join(output_cols)} - - Input data set in CSV format: - - ''' - df = pd.DataFrame({'input__product_name':input_product_names, - 'input__brand':input_brands, - 'input__product_category':input_product_categories}).reset_index(drop=True) - # remove commas from all strings - df2 = df.copy() - for col in df2.columns: - df2[col] = df2[col].str.replace(',', '') - - # send to LLM - p2 = df2.to_csv(index=False, quoting=csv.QUOTE_ALL) - messages = [{'role':'system','content':'You are a helpful assistant. Return a properly-formatted CSV with the correct number of columns.'}, - {'role':'user', 'content':p1+p2+'\n\nOutput CSV with header row:\n\n'} - ] - comp = openai.ChatCompletion.create( - model='gpt-4', - messages=messages, - max_tokens=2000, - timeout=300, - temperature=0.2 - ) - res = comp['choices'][0]['message']['content'] - - # remove rows with wrong number of columns - keeprows = [] - for i,s in enumerate(res.split('\n')): - if i==0: - keeprows.append(s) - continue - _ncols = len(s.split(',')) - if _ncols!=ncols: - print(f'Got {_ncols} columns, skipping row {i-1} ({s})') - df = df.drop(i-1) - else: - keeprows.append(s) - df = df.reset_index(drop=True) - - resdf = pd.read_csv(StringIO('\n'.join(keeprows))) - - assert len(df)==len(resdf), 'Result CSV did not match input CSV in length' - df = pd.concat([df.reset_index(drop=True),resdf.reset_index(drop=True)],axis=1) - # check category/subcategory - dropidxs=[] - for idx, row in df.iterrows(): - drop = False - if pd.isna(row['product_category']) and not pd.isna(row['sub_product_category']): - drop=True - print('product_category is null while sub_product_category is not null, dropping') - if not pd.isna(row['product_category']): - if row['product_category'] not in category_taxonomy.keys(): - print(f'category "{row["product_category"]}" not in taxonomy, dropping row') - drop =True - elif not pd.isna(row['sub_product_category']): - if row['sub_product_category'] not in category_taxonomy[row['product_category']]: - print(f'subcategory "{row["sub_product_category"]}" not valid for category {row["product_category"]}, dropping row') - drop =True - if drop: - dropidxs.append(idx) - df = df.drop(dropidxs) - - return df - -def get_key(df): - return df['input__product_name'] + df['input__brand'] + df['input__product_category'] - -def main(upload_df: pd.DataFrame, - output_df: pd.DataFrame=None, - chunksize: int = 30, - ): - category_taxonomy = { - "Wellness": ["Mushroom Caps", "CBD Tincture/Caps/etc", "Promo/ Sample", "Capsule", "Liquid Flower", ""], - "Concentrate": ["Diamonds", "Shatter", "Sugar", "Promo/ Sample", "Badder", "Diamonds and Sauce", "Rosin", "Cookies Dough", "Flan", "Cookie Dough", ""], - "Preroll": ["Cubano", "Joint", "Promo/ Sample", "Blunt", "Infused Joint", "Packwoods Blunt", "Infused Blunt", "Napalm", ""], - "Vape": ["Terp Sauce", "Gpen 0.5", "Cured Resin", "Solventless Rosin", "510", "Dry Flower Series", "Natural Terp Series", "Promo/ Sample", "Dart Pod 0.5", "Raw Garden", "Live Flower Series", "Rosin", "Disposable", ""], - "Edible": ["Cookies", "Gummies", "Mint", "Promo/ Sample", "Beverage", "Chocolate", ""], - "Grow Products": ["Promo/ Sample", ""], - "Flower": ["Promo/ Sample", "Bud", ""], - "Accessory": ["Promo/ Sample", ""] - } - - # join together and get the diff - upload_df = pd.read_csv(upload_df) - upload_df['key'] = get_key(upload_df) - upload_df=upload_df.set_index('key') - if output_df is None: - rundf = upload_df - outlen = 0 - else: - output_df['key'] = get_key(output_df) - output_df=output_df.set_index('key') - rundf = upload_df.loc[~upload_df.index.isin(output_df.index)] - outlen = len(output_df) - - - # st.write(f'Input size: {len(upload_df)}, Output size: {outlen}, Still to process: {len(rundf)}') - - for _, chunk in tqdm(rundf.groupby(np.arange(len(rundf)) // chunksize)): - result = clean_data(chunk['input__product_name'], chunk['input__brand'], chunk['input__product_category'], category_taxonomy) - result['key'] = get_key(result) - result = result.set_index('key') - output_df = result if output_df is None else pd.concat([output_df, result]) - - return output_df \ No newline at end of file diff --git a/spaces/kdrkdrkdr/HoshinoTTS/export_model.py b/spaces/kdrkdrkdr/HoshinoTTS/export_model.py deleted file mode 100644 index 98a49835df5a7a2486e76ddf94fbbb4444b52203..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/HoshinoTTS/export_model.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -if __name__ == '__main__': - model_path = "saved_model/11/model.pth" - output_path = "saved_model/11/model1.pth" - checkpoint_dict = torch.load(model_path, map_location='cpu') - checkpoint_dict_new = {} - for k, v in checkpoint_dict.items(): - if k == "optimizer": - print("remove optimizer") - continue - checkpoint_dict_new[k] = v - torch.save(checkpoint_dict_new, output_path) diff --git a/spaces/keras-io/ner_with_transformers/app.py b/spaces/keras-io/ner_with_transformers/app.py deleted file mode 100644 index 975f9b705c13596548b8c74e0efc52c3fcc3b642..0000000000000000000000000000000000000000 --- a/spaces/keras-io/ner_with_transformers/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -os.system('pip install tensorflow') - -import json -import numpy as np -import gradio as gr -import tensorflow as tf -from tensorflow import keras -from huggingface_hub.keras_mixin import from_pretrained_keras - -class CustomNonPaddingTokenLoss(keras.losses.Loss): - def __init__(self, name="custom_ner_loss"): - super().__init__(name=name) - - def call(self, y_true, y_pred): - loss_fn = keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=keras.losses.Reduction.NONE - ) - - loss = loss_fn(y_true, y_pred) - mask = tf.cast((y_true > 0), dtype=tf.float32) - - loss = loss * mask - return tf.reduce_sum(loss) / tf.reduce_sum(mask) - -def lowercase_and_convert_to_ids(tokens): - tokens = tf.strings.lower(tokens) - - return lookup_layer(tokens) - -def tokenize_and_convert_to_ids(text): - tokens = text.split() - return lowercase_and_convert_to_ids(tokens) - - -def ner_tagging(text_1): - - with open('mapping.json','r') as f: - mapping = json.load(f) - - ner_model = from_pretrained_keras("keras-io/ner-with-transformers", - custom_objects={'CustomNonPaddingTokenLoss':CustomNonPaddingTokenLoss}, - compile=False) - - - sample_input = tokenize_and_convert_to_ids(text_1) - sample_input = tf.reshape(sample_input, shape=[1, -1]) - output = ner_model.predict(sample_input) - prediction = np.argmax(output, axis=-1)[0] - - prediction = [mapping[str(i)] for i in prediction] - - text_2 = text_1.split(" ") - output = [] - for w in range(len(text_2)): - if prediction[w] != "O": - output.extend([(text_2[w], prediction[w]), (" ", None)]) - else: - output.extend([(text_2[w], None), (" ", None)]) - - return output - -text_1 = gr.inputs.Textbox(lines=5) -ner_tag = gr.outputs.Textbox() - -with open("vocab.json",'r') as f: - vocab = json.load(f) - -lookup_layer = keras.layers.StringLookup(vocabulary=vocab['tokens']) - -iface = gr.Interface(ner_tagging, - inputs=text_1,outputs=['highlight'], examples=[['EU rejects German call to boycott British lamb .'], - ["He said further scientific study was required and if it was found that action was needed it should be taken by the European Union ."]], title="Named Entity Recognition with Transformers", - description = "Named Entity Recognition with Transformers on CoNLL2003 Dataset", - article = "Author: Rishav Chandra Varma") - -iface.launch() \ No newline at end of file diff --git a/spaces/kevinwang676/Bark-UI-with-Voice-Cloning-2/setup.py b/spaces/kevinwang676/Bark-UI-with-Voice-Cloning-2/setup.py deleted file mode 100644 index 606849326a4002007fd42060b51e69a19c18675c..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bark-UI-with-Voice-Cloning-2/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup - -setup() diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/dataset.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/dataset.py deleted file mode 100644 index 96bbb8bb6da99122f350bc8e1a6390245840e32b..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/dataset.py +++ /dev/null @@ -1,124 +0,0 @@ -import numbers -import os -import queue as Queue -import threading - -import mxnet as mx -import numpy as np -import torch -from torch.utils.data import DataLoader, Dataset -from torchvision import transforms - - -class BackgroundGenerator(threading.Thread): - def __init__(self, generator, local_rank, max_prefetch=6): - super(BackgroundGenerator, self).__init__() - self.queue = Queue.Queue(max_prefetch) - self.generator = generator - self.local_rank = local_rank - self.daemon = True - self.start() - - def run(self): - torch.cuda.set_device(self.local_rank) - for item in self.generator: - self.queue.put(item) - self.queue.put(None) - - def next(self): - next_item = self.queue.get() - if next_item is None: - raise StopIteration - return next_item - - def __next__(self): - return self.next() - - def __iter__(self): - return self - - -class DataLoaderX(DataLoader): - - def __init__(self, local_rank, **kwargs): - super(DataLoaderX, self).__init__(**kwargs) - self.stream = torch.cuda.Stream(local_rank) - self.local_rank = local_rank - - def __iter__(self): - self.iter = super(DataLoaderX, self).__iter__() - self.iter = BackgroundGenerator(self.iter, self.local_rank) - self.preload() - return self - - def preload(self): - self.batch = next(self.iter, None) - if self.batch is None: - return None - with torch.cuda.stream(self.stream): - for k in range(len(self.batch)): - self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True) - - def __next__(self): - torch.cuda.current_stream().wait_stream(self.stream) - batch = self.batch - if batch is None: - raise StopIteration - self.preload() - return batch - - -class MXFaceDataset(Dataset): - def __init__(self, root_dir, local_rank): - super(MXFaceDataset, self).__init__() - self.transform = transforms.Compose( - [transforms.ToPILImage(), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ]) - self.root_dir = root_dir - self.local_rank = local_rank - path_imgrec = os.path.join(root_dir, 'train.rec') - path_imgidx = os.path.join(root_dir, 'train.idx') - self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') - s = self.imgrec.read_idx(0) - header, _ = mx.recordio.unpack(s) - if header.flag > 0: - self.header0 = (int(header.label[0]), int(header.label[1])) - self.imgidx = np.array(range(1, int(header.label[0]))) - else: - self.imgidx = np.array(list(self.imgrec.keys)) - - def __getitem__(self, index): - idx = self.imgidx[index] - s = self.imgrec.read_idx(idx) - header, img = mx.recordio.unpack(s) - label = header.label - if not isinstance(label, numbers.Number): - label = label[0] - label = torch.tensor(label, dtype=torch.long) - sample = mx.image.imdecode(img).asnumpy() - if self.transform is not None: - sample = self.transform(sample) - return sample, label - - def __len__(self): - return len(self.imgidx) - - -class SyntheticDataset(Dataset): - def __init__(self, local_rank): - super(SyntheticDataset, self).__init__() - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).squeeze(0).float() - img = ((img / 255) - 0.5) / 0.5 - self.img = img - self.label = 1 - - def __getitem__(self, index): - return self.img, self.label - - def __len__(self): - return 1000000 diff --git a/spaces/kevinwang676/VALLE/modules/embedding.py b/spaces/kevinwang676/VALLE/modules/embedding.py deleted file mode 100644 index 17f6c316da3de6a432f4d43f9563800fdb6d58c4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VALLE/modules/embedding.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2023 (authors: Feiteng Li) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math - -import torch -import torch.nn as nn - - -class TokenEmbedding(nn.Module): - def __init__( - self, - dim_model: int, - vocab_size: int, - dropout: float = 0.0, - ): - super().__init__() - - self.vocab_size = vocab_size - self.dim_model = dim_model - - self.dropout = torch.nn.Dropout(p=dropout) - self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model) - - @property - def weight(self) -> torch.Tensor: - return self.word_embeddings.weight - - def embedding(self, index: int) -> torch.Tensor: - return self.word_embeddings.weight[index : index + 1] - - def forward(self, x: torch.Tensor): - X = self.word_embeddings(x) - X = self.dropout(X) - - return X - - -class SinePositionalEmbedding(nn.Module): - def __init__( - self, - dim_model: int, - dropout: float = 0.0, - scale: bool = False, - alpha: bool = False, - ): - super().__init__() - self.dim_model = dim_model - self.x_scale = math.sqrt(dim_model) if scale else 1.0 - self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) - self.dropout = torch.nn.Dropout(p=dropout) - - self.reverse = False - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, 4000)) - - def extend_pe(self, x): - """Reset the positional encodings.""" - if self.pe is not None: - if self.pe.size(1) >= x.size(1): - if self.pe.dtype != x.dtype or self.pe.device != x.device: - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - pe = torch.zeros(x.size(1), self.dim_model) - if self.reverse: - position = torch.arange( - x.size(1) - 1, -1, -1.0, dtype=torch.float32 - ).unsqueeze(1) - else: - position = torch.arange( - 0, x.size(1), dtype=torch.float32 - ).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, self.dim_model, 2, dtype=torch.float32) - * -(math.log(10000.0) / self.dim_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - self.pe = pe.to(device=x.device, dtype=x.dtype).detach() - - def forward(self, x: torch.Tensor) -> torch.Tensor: - self.extend_pe(x) - output = x.unsqueeze(-1) if x.ndim == 2 else x - output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)] - return self.dropout(output) diff --git a/spaces/kevinwang676/VALLE/utils/sentence_cutter.py b/spaces/kevinwang676/VALLE/utils/sentence_cutter.py deleted file mode 100644 index e50bd324daee54b7a76f260b0c170d5626aa6f0f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VALLE/utils/sentence_cutter.py +++ /dev/null @@ -1,55 +0,0 @@ -import nltk -import jieba -import sudachipy -import langid -nltk.download('punkt') -langid.set_languages(['en', 'zh', 'ja']) - -def split_text_into_sentences(text): - if langid.classify(text)[0] == "en": - sentences = nltk.tokenize.sent_tokenize(text) - - return sentences - elif langid.classify(text)[0] == "zh": - sentences = [] - segs = jieba.cut(text, cut_all=False) - segs = list(segs) - start = 0 - for i, seg in enumerate(segs): - if seg in ["。", "!", "?", "……"]: - sentences.append("".join(segs[start:i + 1])) - start = i + 1 - if start < len(segs): - sentences.append("".join(segs[start:])) - - return sentences - elif langid.classify(text)[0] == "ja": - sentences = [] - tokenizer = sudachipy.Dictionary().create() - tokens = tokenizer.tokenize(text) - current_sentence = "" - - for token in tokens: - current_sentence += token.surface() - if token.part_of_speech()[0] == "補助記号" and token.part_of_speech()[1] == "句点": - sentences.append(current_sentence) - current_sentence = "" - - if current_sentence: - sentences.append(current_sentence) - - return sentences - - raise RuntimeError("It is impossible to reach here.") - -long_text = """ -This is a very long paragraph, so most TTS model is unable to handle it. Hence, we have to split it into several sentences. With the help of NLTK, we can split it into sentences. However, the punctuation is not preserved, so we have to add it back. How are we going to do write this code? Let's see. -""" - -long_text = """ -现在我们要来尝试一下中文分句。因为很不幸的是,NLTK不支持中文分句。幸运的是,我们可以使用jieba来分句。但是,jieba分句后,标点符号会丢失,所以我们要手动添加回去。我现在正在想办法把这个例句写的更长更复杂一点,来测试jieba分句的性能。嗯......省略号,感觉不太好,因为省略号不是句号,所以jieba不会把它当作句子的结尾。会这样吗?我们来试试看。 -""" - -long_text = """ -これなら、英語と中国語の分句もできる。でも、日本語はどうする?まつわ、ChatGPTに僕と教えてください。ちょーと待ってください。あ、出来た! -""" \ No newline at end of file diff --git a/spaces/kevinwang676/rvc-models-new/infer_pack/attentions.py b/spaces/kevinwang676/rvc-models-new/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/rvc-models-new/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/synthesizer/models/sublayer/__init__.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/synthesizer/models/sublayer/__init__.py deleted file mode 100644 index 4287ca8617970fa8fc025b75cb319c7032706910..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/synthesizer/models/sublayer/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# \ No newline at end of file diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/models.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/models.py deleted file mode 100644 index c352e19f0c8aab5b3c24e861f5b1c06c17c5e750..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/models.py +++ /dev/null @@ -1,320 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from vocoder.hifigan.utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - -class InterpolationBlock(torch.nn.Module): - def __init__(self, scale_factor, mode='nearest', align_corners=None, downsample=False): - super(InterpolationBlock, self).__init__() - self.downsample = downsample - self.scale_factor = scale_factor - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - outputs = torch.nn.functional.interpolate( - x, - size=x.shape[-1] * self.scale_factor \ - if not self.downsample else x.shape[-1] // self.scale_factor, - mode=self.mode, - align_corners=self.align_corners, - recompute_scale_factor=False - ) - return outputs - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - self.num_kernels = len(h.resblock_kernel_sizes) - self.num_upsamples = len(h.upsample_rates) - self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)) - resblock = ResBlock1 if h.resblock == '1' else ResBlock2 - - self.ups = nn.ModuleList() -# for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): -# # self.ups.append(weight_norm( -# # ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)), -# # k, u, padding=(k-u)//2))) - if h.sampling_rate == 24000: - for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): - self.ups.append( - torch.nn.Sequential( - InterpolationBlock(u), - weight_norm(torch.nn.Conv1d( - h.upsample_initial_channel//(2**i), - h.upsample_initial_channel//(2**(i+1)), - k, padding=(k-1)//2, - )) - ) - ) - else: - for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): - self.ups.append(weight_norm(ConvTranspose1d(h.upsample_initial_channel//(2**i), - h.upsample_initial_channel//(2**(i+1)), - k, u, padding=(u//2 + u%2), output_padding=u%2))) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h.upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - - def forward(self, x): - x = self.conv_pre(x) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - if self.h.sampling_rate == 24000: - remove_weight_norm(l[-1]) - else: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiPeriodDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorP(2), - DiscriminatorP(3), - DiscriminatorP(5), - DiscriminatorP(7), - DiscriminatorP(11), - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i-1](y) - y_hat = self.meanpools[i-1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss*2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - diff --git a/spaces/kmahtan2/AIPairProgramming2/app.py b/spaces/kmahtan2/AIPairProgramming2/app.py deleted file mode 100644 index cba1bd1cd68ddbc9bfdf2da54cb02dfdb191a84d..0000000000000000000000000000000000000000 --- a/spaces/kmahtan2/AIPairProgramming2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import streamlit as st - -st.markdown('Hello World') \ No newline at end of file diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/core/corr.py b/spaces/kukuhtw/VToonify/vtoonify/model/raft/core/corr.py deleted file mode 100644 index 40214aa5e6f0392a732eacab9d9cb0cbfb4669f3..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/VToonify/vtoonify/model/raft/core/corr.py +++ /dev/null @@ -1,91 +0,0 @@ -import torch -import torch.nn.functional as F -from model.raft.core.utils.utils import bilinear_sampler, coords_grid - -try: - import alt_cuda_corr -except: - # alt_cuda_corr is not compiled - pass - - -class CorrBlock: - def __init__(self, fmap1, fmap2, num_levels=4, radius=4): - self.num_levels = num_levels - self.radius = radius - self.corr_pyramid = [] - - # all pairs correlation - corr = CorrBlock.corr(fmap1, fmap2) - - batch, h1, w1, dim, h2, w2 = corr.shape - corr = corr.reshape(batch*h1*w1, dim, h2, w2) - - self.corr_pyramid.append(corr) - for i in range(self.num_levels-1): - corr = F.avg_pool2d(corr, 2, stride=2) - self.corr_pyramid.append(corr) - - def __call__(self, coords): - r = self.radius - coords = coords.permute(0, 2, 3, 1) - batch, h1, w1, _ = coords.shape - - out_pyramid = [] - for i in range(self.num_levels): - corr = self.corr_pyramid[i] - dx = torch.linspace(-r, r, 2*r+1, device=coords.device) - dy = torch.linspace(-r, r, 2*r+1, device=coords.device) - delta = torch.stack(torch.meshgrid(dy, dx), axis=-1) - - centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i - delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) - coords_lvl = centroid_lvl + delta_lvl - - corr = bilinear_sampler(corr, coords_lvl) - corr = corr.view(batch, h1, w1, -1) - out_pyramid.append(corr) - - out = torch.cat(out_pyramid, dim=-1) - return out.permute(0, 3, 1, 2).contiguous().float() - - @staticmethod - def corr(fmap1, fmap2): - batch, dim, ht, wd = fmap1.shape - fmap1 = fmap1.view(batch, dim, ht*wd) - fmap2 = fmap2.view(batch, dim, ht*wd) - - corr = torch.matmul(fmap1.transpose(1,2), fmap2) - corr = corr.view(batch, ht, wd, 1, ht, wd) - return corr / torch.sqrt(torch.tensor(dim).float()) - - -class AlternateCorrBlock: - def __init__(self, fmap1, fmap2, num_levels=4, radius=4): - self.num_levels = num_levels - self.radius = radius - - self.pyramid = [(fmap1, fmap2)] - for i in range(self.num_levels): - fmap1 = F.avg_pool2d(fmap1, 2, stride=2) - fmap2 = F.avg_pool2d(fmap2, 2, stride=2) - self.pyramid.append((fmap1, fmap2)) - - def __call__(self, coords): - coords = coords.permute(0, 2, 3, 1) - B, H, W, _ = coords.shape - dim = self.pyramid[0][0].shape[1] - - corr_list = [] - for i in range(self.num_levels): - r = self.radius - fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous() - fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous() - - coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous() - corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r) - corr_list.append(corr.squeeze(1)) - - corr = torch.stack(corr_list, dim=1) - corr = corr.reshape(B, -1, H, W) - return corr / torch.sqrt(torch.tensor(dim).float()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py deleted file mode 100644 index 67dfc3d3c8e5726c5885b1c62cdcb2553854c4dc..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py +++ /dev/null @@ -1,255 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# TGA file handling -# -# History: -# 95-09-01 fl created (reads 24-bit files only) -# 97-01-04 fl support more TGA versions, including compressed images -# 98-07-04 fl fixed orientation and alpha layer bugs -# 98-09-11 fl fixed orientation for runlength decoder -# -# Copyright (c) Secret Labs AB 1997-98. -# Copyright (c) Fredrik Lundh 1995-97. -# -# See the README file for information on usage and redistribution. -# - - -import warnings - -from . import Image, ImageFile, ImagePalette -from ._binary import i16le as i16 -from ._binary import o8 -from ._binary import o16le as o16 - -# -# -------------------------------------------------------------------- -# Read RGA file - - -MODES = { - # map imagetype/depth to rawmode - (1, 8): "P", - (3, 1): "1", - (3, 8): "L", - (3, 16): "LA", - (2, 16): "BGR;5", - (2, 24): "BGR", - (2, 32): "BGRA", -} - - -## -# Image plugin for Targa files. - - -class TgaImageFile(ImageFile.ImageFile): - format = "TGA" - format_description = "Targa" - - def _open(self): - # process header - s = self.fp.read(18) - - id_len = s[0] - - colormaptype = s[1] - imagetype = s[2] - - depth = s[16] - - flags = s[17] - - self._size = i16(s, 12), i16(s, 14) - - # validate header fields - if ( - colormaptype not in (0, 1) - or self.size[0] <= 0 - or self.size[1] <= 0 - or depth not in (1, 8, 16, 24, 32) - ): - msg = "not a TGA file" - raise SyntaxError(msg) - - # image mode - if imagetype in (3, 11): - self.mode = "L" - if depth == 1: - self.mode = "1" # ??? - elif depth == 16: - self.mode = "LA" - elif imagetype in (1, 9): - self.mode = "P" - elif imagetype in (2, 10): - self.mode = "RGB" - if depth == 32: - self.mode = "RGBA" - else: - msg = "unknown TGA mode" - raise SyntaxError(msg) - - # orientation - orientation = flags & 0x30 - self._flip_horizontally = orientation in [0x10, 0x30] - if orientation in [0x20, 0x30]: - orientation = 1 - elif orientation in [0, 0x10]: - orientation = -1 - else: - msg = "unknown TGA orientation" - raise SyntaxError(msg) - - self.info["orientation"] = orientation - - if imagetype & 8: - self.info["compression"] = "tga_rle" - - if id_len: - self.info["id_section"] = self.fp.read(id_len) - - if colormaptype: - # read palette - start, size, mapdepth = i16(s, 3), i16(s, 5), s[7] - if mapdepth == 16: - self.palette = ImagePalette.raw( - "BGR;15", b"\0" * 2 * start + self.fp.read(2 * size) - ) - elif mapdepth == 24: - self.palette = ImagePalette.raw( - "BGR", b"\0" * 3 * start + self.fp.read(3 * size) - ) - elif mapdepth == 32: - self.palette = ImagePalette.raw( - "BGRA", b"\0" * 4 * start + self.fp.read(4 * size) - ) - - # setup tile descriptor - try: - rawmode = MODES[(imagetype & 7, depth)] - if imagetype & 8: - # compressed - self.tile = [ - ( - "tga_rle", - (0, 0) + self.size, - self.fp.tell(), - (rawmode, orientation, depth), - ) - ] - else: - self.tile = [ - ( - "raw", - (0, 0) + self.size, - self.fp.tell(), - (rawmode, 0, orientation), - ) - ] - except KeyError: - pass # cannot decode - - def load_end(self): - if self._flip_horizontally: - self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) - - -# -# -------------------------------------------------------------------- -# Write TGA file - - -SAVE = { - "1": ("1", 1, 0, 3), - "L": ("L", 8, 0, 3), - "LA": ("LA", 16, 0, 3), - "P": ("P", 8, 1, 1), - "RGB": ("BGR", 24, 0, 2), - "RGBA": ("BGRA", 32, 0, 2), -} - - -def _save(im, fp, filename): - try: - rawmode, bits, colormaptype, imagetype = SAVE[im.mode] - except KeyError as e: - msg = f"cannot write mode {im.mode} as TGA" - raise OSError(msg) from e - - if "rle" in im.encoderinfo: - rle = im.encoderinfo["rle"] - else: - compression = im.encoderinfo.get("compression", im.info.get("compression")) - rle = compression == "tga_rle" - if rle: - imagetype += 8 - - id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) - id_len = len(id_section) - if id_len > 255: - id_len = 255 - id_section = id_section[:255] - warnings.warn("id_section has been trimmed to 255 characters") - - if colormaptype: - palette = im.im.getpalette("RGB", "BGR") - colormaplength, colormapentry = len(palette) // 3, 24 - else: - colormaplength, colormapentry = 0, 0 - - if im.mode in ("LA", "RGBA"): - flags = 8 - else: - flags = 0 - - orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) - if orientation > 0: - flags = flags | 0x20 - - fp.write( - o8(id_len) - + o8(colormaptype) - + o8(imagetype) - + o16(0) # colormapfirst - + o16(colormaplength) - + o8(colormapentry) - + o16(0) - + o16(0) - + o16(im.size[0]) - + o16(im.size[1]) - + o8(bits) - + o8(flags) - ) - - if id_section: - fp.write(id_section) - - if colormaptype: - fp.write(palette) - - if rle: - ImageFile._save( - im, fp, [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))] - ) - else: - ImageFile._save( - im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))] - ) - - # write targa version 2 footer - fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") - - -# -# -------------------------------------------------------------------- -# Registry - - -Image.register_open(TgaImageFile.format, TgaImageFile) -Image.register_save(TgaImageFile.format, _save) - -Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) - -Image.register_mime(TgaImageFile.format, "image/x-tga") diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/vegalite/v5/theme.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/vegalite/v5/theme.py deleted file mode 100644 index b536a1ddebe6c311672e6ce2757853ecffa6fb1e..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/vegalite/v5/theme.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Tools for enabling and registering chart themes""" - -from ...utils.theme import ThemeRegistry - -VEGA_THEMES = [ - "ggplot2", - "quartz", - "vox", - "fivethirtyeight", - "dark", - "latimes", - "urbaninstitute", - "excel", - "googlecharts", - "powerbi", -] - - -class VegaTheme: - """Implementation of a builtin vega theme.""" - - def __init__(self, theme): - self.theme = theme - - def __call__(self): - return { - "usermeta": {"embedOptions": {"theme": self.theme}}, - "config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}, - } - - def __repr__(self): - return "VegaTheme({!r})".format(self.theme) - - -# The entry point group that can be used by other packages to declare other -# renderers that will be auto-detected. Explicit registration is also -# allowed by the PluginRegistery API. -ENTRY_POINT_GROUP = "altair.vegalite.v5.theme" # type: str -themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP) - -themes.register( - "default", - lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}}, -) -themes.register( - "opaque", - lambda: { - "config": { - "background": "white", - "view": {"continuousWidth": 300, "continuousHeight": 300}, - } - }, -) -themes.register("none", lambda: {}) - -for theme in VEGA_THEMES: - themes.register(theme, VegaTheme(theme)) - -themes.enable("default") diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py deleted file mode 100644 index b7a3d8e078574e87dc6e345d621f5a596c3bdc1e..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/httpsredirect.py +++ /dev/null @@ -1,3 +0,0 @@ -from starlette.middleware.httpsredirect import ( # noqa - HTTPSRedirectMiddleware as HTTPSRedirectMiddleware, -) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py deleted file mode 100644 index efb2b2d14cc46dc51ff795cf7a1fb95bd6d63673..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -"""Calculate the perimeter of a glyph.""" - -from fontTools.pens.basePen import BasePen -from fontTools.misc.bezierTools import ( - approximateQuadraticArcLengthC, - calcQuadraticArcLengthC, - approximateCubicArcLengthC, - calcCubicArcLengthC, -) -import math - - -__all__ = ["PerimeterPen"] - - -def _distance(p0, p1): - return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) - - -class PerimeterPen(BasePen): - def __init__(self, glyphset=None, tolerance=0.005): - BasePen.__init__(self, glyphset) - self.value = 0 - self.tolerance = tolerance - - # Choose which algorithm to use for quadratic and for cubic. - # Quadrature is faster but has fixed error characteristic with no strong - # error bound. The cutoff points are derived empirically. - self._addCubic = ( - self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive - ) - self._addQuadratic = ( - self._addQuadraticQuadrature - if tolerance >= 0.00075 - else self._addQuadraticExact - ) - - def _moveTo(self, p0): - self.__startPoint = p0 - - def _closePath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - self._lineTo(self.__startPoint) - - def _lineTo(self, p1): - p0 = self._getCurrentPoint() - self.value += _distance(p0, p1) - - def _addQuadraticExact(self, c0, c1, c2): - self.value += calcQuadraticArcLengthC(c0, c1, c2) - - def _addQuadraticQuadrature(self, c0, c1, c2): - self.value += approximateQuadraticArcLengthC(c0, c1, c2) - - def _qCurveToOne(self, p1, p2): - p0 = self._getCurrentPoint() - self._addQuadratic(complex(*p0), complex(*p1), complex(*p2)) - - def _addCubicRecursive(self, c0, c1, c2, c3): - self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance) - - def _addCubicQuadrature(self, c0, c1, c2, c3): - self.value += approximateCubicArcLengthC(c0, c1, c2, c3) - - def _curveToOne(self, p1, p2, p3): - p0 = self._getCurrentPoint() - self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3)) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/hub_mixin.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/hub_mixin.py deleted file mode 100644 index 427eabb56ce64cf29537a107908408d688df5bee..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/hub_mixin.py +++ /dev/null @@ -1,388 +0,0 @@ -import json -import os -import warnings -from pathlib import Path -from typing import Dict, List, Optional, Type, TypeVar, Union - -import requests - -from .constants import CONFIG_NAME, PYTORCH_WEIGHTS_NAME -from .file_download import hf_hub_download, is_torch_available -from .hf_api import HfApi -from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args -from .utils._deprecation import _deprecate_positional_args - - -if is_torch_available(): - import torch # type: ignore - -logger = logging.get_logger(__name__) - -# Generic variable that is either ModelHubMixin or a subclass thereof -T = TypeVar("T", bound="ModelHubMixin") - - -class ModelHubMixin: - """ - A generic mixin to integrate ANY machine learning framework with the Hub. - - To integrate your framework, your model class must inherit from this class. Custom logic for saving/loading models - have to be overwritten in [`_from_pretrained`] and [`_save_pretrained`]. [`PyTorchModelHubMixin`] is a good example - of mixin integration with the Hub. Check out our [integration guide](../guides/integrations) for more instructions. - """ - - @_deprecate_positional_args(version="0.16") - def save_pretrained( - self, - save_directory: Union[str, Path], - *, - config: Optional[dict] = None, - repo_id: Optional[str] = None, - push_to_hub: bool = False, - **kwargs, - ) -> Optional[str]: - """ - Save weights in local directory. - - Args: - save_directory (`str` or `Path`): - Path to directory in which the model weights and configuration will be saved. - config (`dict`, *optional*): - Model configuration specified as a key/value dictionary. - push_to_hub (`bool`, *optional*, defaults to `False`): - Whether or not to push your model to the Huggingface Hub after saving it. - repo_id (`str`, *optional*): - ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if - not provided. - kwargs: - Additional key word arguments passed along to the [`~ModelHubMixin._from_pretrained`] method. - """ - save_directory = Path(save_directory) - save_directory.mkdir(parents=True, exist_ok=True) - - # saving model weights/files - self._save_pretrained(save_directory) - - # saving config - if isinstance(config, dict): - (save_directory / CONFIG_NAME).write_text(json.dumps(config)) - - if push_to_hub: - kwargs = kwargs.copy() # soft-copy to avoid mutating input - if config is not None: # kwarg for `push_to_hub` - kwargs["config"] = config - if repo_id is None: - repo_id = save_directory.name # Defaults to `save_directory` name - return self.push_to_hub(repo_id=repo_id, **kwargs) - return None - - def _save_pretrained(self, save_directory: Path) -> None: - """ - Overwrite this method in subclass to define how to save your model. - Check out our [integration guide](../guides/integrations) for instructions. - - Args: - save_directory (`str` or `Path`): - Path to directory in which the model weights and configuration will be saved. - """ - raise NotImplementedError - - @classmethod - @validate_hf_hub_args - @_deprecate_positional_args(version="0.16") - def from_pretrained( - cls: Type[T], - pretrained_model_name_or_path: Union[str, Path], - *, - force_download: bool = False, - resume_download: bool = False, - proxies: Optional[Dict] = None, - token: Optional[Union[str, bool]] = None, - cache_dir: Optional[Union[str, Path]] = None, - local_files_only: bool = False, - revision: Optional[str] = None, - **model_kwargs, - ) -> T: - """ - Download a model from the Huggingface Hub and instantiate it. - - Args: - pretrained_model_name_or_path (`str`, `Path`): - - Either the `model_id` (string) of a model hosted on the Hub, e.g. `bigscience/bloom`. - - Or a path to a `directory` containing model weights saved using - [`~transformers.PreTrainedModel.save_pretrained`], e.g., `../path/to/my_model_directory/`. - revision (`str`, *optional*): - Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. - Defaults to the latest commit on `main` branch. - force_download (`bool`, *optional*, defaults to `False`): - Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding - the existing cache. - resume_download (`bool`, *optional*, defaults to `False`): - Whether to delete incompletely received files. Will attempt to resume the download if such a file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. - token (`str` or `bool`, *optional*): - The token to use as HTTP bearer authorization for remote files. By default, it will use the token - cached when running `huggingface-cli login`. - cache_dir (`str`, `Path`, *optional*): - Path to the folder where cached files are stored. - local_files_only (`bool`, *optional*, defaults to `False`): - If `True`, avoid downloading the file and return the path to the local cached file if it exists. - model_kwargs (`Dict`, *optional*): - Additional kwargs to pass to the model during initialization. - """ - model_id = pretrained_model_name_or_path - - if isinstance(model_id, str) and len(model_id.split("@")) == 2: - warnings.warn( - ( - "Passing a revision using 'namespace/model_id@revision' pattern is" - " deprecated and will be removed in version v0.16. Please pass" - " 'revision=...' as argument." - ), - FutureWarning, - ) - model_id, revision = model_id.split("@") - - config_file: Optional[str] = None - if os.path.isdir(model_id): - if CONFIG_NAME in os.listdir(model_id): - config_file = os.path.join(model_id, CONFIG_NAME) - else: - logger.warning(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") - elif isinstance(model_id, str): - try: - config_file = hf_hub_download( - repo_id=str(model_id), - filename=CONFIG_NAME, - revision=revision, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - token=token, - local_files_only=local_files_only, - ) - except requests.exceptions.RequestException: - logger.warning(f"{CONFIG_NAME} not found in HuggingFace Hub.") - - if config_file is not None: - with open(config_file, "r", encoding="utf-8") as f: - config = json.load(f) - model_kwargs.update({"config": config}) - - return cls._from_pretrained( - model_id=model_id, - revision=revision, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - token=token, - **model_kwargs, - ) - - @classmethod - @_deprecate_positional_args(version="0.16") - def _from_pretrained( - cls: Type[T], - *, - model_id: str, - revision: Optional[str], - cache_dir: Optional[Union[str, Path]], - force_download: bool, - proxies: Optional[Dict], - resume_download: bool, - local_files_only: bool, - token: Optional[Union[str, bool]], - **model_kwargs, - ) -> T: - """Overwrite this method in subclass to define how to load your model from pretrained. - - Use [`hf_hub_download`] or [`snapshot_download`] to download files from the Hub before loading them. Most - args taken as input can be directly passed to those 2 methods. If needed, you can add more arguments to this - method using "model_kwargs". For example [`PyTorchModelHubMixin._from_pretrained`] takes as input a `map_location` - parameter to set on which device the model should be loaded. - - Check out our [integration guide](../guides/integrations) for more instructions. - - Args: - model_id (`str`): - ID of the model to load from the Huggingface Hub (e.g. `bigscience/bloom`). - revision (`str`, *optional*): - Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the - latest commit on `main` branch. - force_download (`bool`, *optional*, defaults to `False`): - Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding - the existing cache. - resume_download (`bool`, *optional*, defaults to `False`): - Whether to delete incompletely received files. Will attempt to resume the download if such a file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint (e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`). - token (`str` or `bool`, *optional*): - The token to use as HTTP bearer authorization for remote files. By default, it will use the token - cached when running `huggingface-cli login`. - cache_dir (`str`, `Path`, *optional*): - Path to the folder where cached files are stored. - local_files_only (`bool`, *optional*, defaults to `False`): - If `True`, avoid downloading the file and return the path to the local cached file if it exists. - model_kwargs: - Additional keyword arguments passed along to the [`~ModelHubMixin._from_pretrained`] method. - """ - raise NotImplementedError - - @validate_hf_hub_args - def push_to_hub( - self, - repo_id: str, - *, - config: Optional[dict] = None, - commit_message: str = "Push model using huggingface_hub.", - private: bool = False, - api_endpoint: Optional[str] = None, - token: Optional[str] = None, - branch: Optional[str] = None, - create_pr: Optional[bool] = None, - allow_patterns: Optional[Union[List[str], str]] = None, - ignore_patterns: Optional[Union[List[str], str]] = None, - delete_patterns: Optional[Union[List[str], str]] = None, - ) -> str: - """ - Upload model checkpoint to the Hub. - - Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use - `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more - details. - - - Args: - repo_id (`str`): - ID of the repository to push to (example: `"username/my-model"`). - config (`dict`, *optional*): - Configuration object to be saved alongside the model weights. - commit_message (`str`, *optional*): - Message to commit while pushing. - private (`bool`, *optional*, defaults to `False`): - Whether the repository created should be private. - api_endpoint (`str`, *optional*): - The API endpoint to use when pushing the model to the hub. - token (`str`, *optional*): - The token to use as HTTP bearer authorization for remote files. By default, it will use the token - cached when running `huggingface-cli login`. - branch (`str`, *optional*): - The git branch on which to push the model. This defaults to `"main"`. - create_pr (`boolean`, *optional*): - Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. - allow_patterns (`List[str]` or `str`, *optional*): - If provided, only files matching at least one pattern are pushed. - ignore_patterns (`List[str]` or `str`, *optional*): - If provided, files matching any of the patterns are not pushed. - delete_patterns (`List[str]` or `str`, *optional*): - If provided, remote files matching any of the patterns will be deleted from the repo. - - Returns: - The url of the commit of your model in the given repository. - """ - api = HfApi(endpoint=api_endpoint, token=token) - repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id - - # Push the files to the repo in a single commit - with SoftTemporaryDirectory() as tmp: - saved_path = Path(tmp) / repo_id - self.save_pretrained(saved_path, config=config) - return api.upload_folder( - repo_id=repo_id, - repo_type="model", - folder_path=saved_path, - commit_message=commit_message, - revision=branch, - create_pr=create_pr, - allow_patterns=allow_patterns, - ignore_patterns=ignore_patterns, - delete_patterns=delete_patterns, - ) - - -class PyTorchModelHubMixin(ModelHubMixin): - """ - Implementation of [`ModelHubMixin`] to provide model Hub upload/download capabilities to PyTorch models. The model - is set in evaluation mode by default using `model.eval()` (dropout modules are deactivated). To train the model, - you should first set it back in training mode with `model.train()`. - - Example: - - ```python - >>> import torch - >>> import torch.nn as nn - >>> from huggingface_hub import PyTorchModelHubMixin - - - >>> class MyModel(nn.Module, PyTorchModelHubMixin): - ... def __init__(self): - ... super().__init__() - ... self.param = nn.Parameter(torch.rand(3, 4)) - ... self.linear = nn.Linear(4, 5) - - ... def forward(self, x): - ... return self.linear(x + self.param) - >>> model = MyModel() - - # Save model weights to local directory - >>> model.save_pretrained("my-awesome-model") - - # Push model weights to the Hub - >>> model.push_to_hub("my-awesome-model") - - # Download and initialize weights from the Hub - >>> model = MyModel.from_pretrained("username/my-awesome-model") - ``` - """ - - def _save_pretrained(self, save_directory: Path) -> None: - """Save weights from a Pytorch model to a local directory.""" - model_to_save = self.module if hasattr(self, "module") else self # type: ignore - torch.save(model_to_save.state_dict(), save_directory / PYTORCH_WEIGHTS_NAME) - - @classmethod - @_deprecate_positional_args(version="0.16") - def _from_pretrained( - cls, - *, - model_id: str, - revision: str, - cache_dir: str, - force_download: bool, - proxies: Optional[Dict], - resume_download: bool, - local_files_only: bool, - token: Union[str, bool, None], - map_location: str = "cpu", - strict: bool = False, - **model_kwargs, - ): - """Load Pytorch pretrained weights and return the loaded model.""" - if os.path.isdir(model_id): - print("Loading weights from local directory") - model_file = os.path.join(model_id, PYTORCH_WEIGHTS_NAME) - else: - model_file = hf_hub_download( - repo_id=model_id, - filename=PYTORCH_WEIGHTS_NAME, - revision=revision, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - token=token, - local_files_only=local_files_only, - ) - model = cls(**model_kwargs) - - state_dict = torch.load(model_file, map_location=torch.device(map_location)) - model.load_state_dict(state_dict, strict=strict) # type: ignore - model.eval() # type: ignore - - return model diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py deleted file mode 100644 index 3a934c21fd50764515fe4c56810489c50510079b..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py +++ /dev/null @@ -1,2597 +0,0 @@ -""" -Implementation details for :mod:`.mathtext`. -""" - -import copy -from collections import namedtuple -import enum -import functools -import logging -import os -import re -import types -import unicodedata - -import numpy as np -from pyparsing import ( - Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional, - ParseBaseException, ParseException, ParseExpression, ParseFatalException, - ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore, - pyparsing_common) - -import matplotlib as mpl -from . import _api, cbook -from ._mathtext_data import ( - latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni) -from .font_manager import FontProperties, findfont, get_font -from .ft2font import FT2Image, KERNING_DEFAULT - - -ParserElement.enablePackrat() -_log = logging.getLogger("matplotlib.mathtext") - - -############################################################################## -# FONTS - - -@_api.delete_parameter("3.6", "math") -def get_unicode_index(symbol, math=False): # Publicly exported. - r""" - Return the integer index (from the Unicode table) of *symbol*. - - Parameters - ---------- - symbol : str - A single (Unicode) character, a TeX command (e.g. r'\pi') or a Type1 - symbol name (e.g. 'phi'). - math : bool, default: False - If True (deprecated), replace ASCII hyphen-minus by Unicode minus. - """ - # From UTF #25: U+2212 minus sign is the preferred - # representation of the unary and binary minus sign rather than - # the ASCII-derived U+002D hyphen-minus, because minus sign is - # unambiguous and because it is rendered with a more desirable - # length, usually longer than a hyphen. - # Remove this block when the 'math' parameter is deleted. - if math and symbol == '-': - return 0x2212 - try: # This will succeed if symbol is a single Unicode char - return ord(symbol) - except TypeError: - pass - try: # Is symbol a TeX symbol (i.e. \alpha) - return tex2uni[symbol.strip("\\")] - except KeyError as err: - raise ValueError( - "'{}' is not a valid Unicode character or TeX/Type1 symbol" - .format(symbol)) from err - - -VectorParse = namedtuple("VectorParse", "width height depth glyphs rects", - module="matplotlib.mathtext") -VectorParse.__doc__ = r""" -The namedtuple type returned by ``MathTextParser("path").parse(...)``. - -This tuple contains the global metrics (*width*, *height*, *depth*), a list of -*glyphs* (including their positions) and of *rect*\angles. -""" - - -RasterParse = namedtuple("RasterParse", "ox oy width height depth image", - module="matplotlib.mathtext") -RasterParse.__doc__ = r""" -The namedtuple type returned by ``MathTextParser("agg").parse(...)``. - -This tuple contains the global metrics (*width*, *height*, *depth*), and a -raster *image*. The offsets *ox*, *oy* are always zero. -""" - - -class Output: - r""" - Result of `ship`\ping a box: lists of positioned glyphs and rectangles. - - This class is not exposed to end users, but converted to a `VectorParse` or - a `RasterParse` by `.MathTextParser.parse`. - """ - - def __init__(self, box): - self.box = box - self.glyphs = [] # (ox, oy, info) - self.rects = [] # (x1, y1, x2, y2) - - def to_vector(self): - w, h, d = map( - np.ceil, [self.box.width, self.box.height, self.box.depth]) - gs = [(info.font, info.fontsize, info.num, ox, h - oy + info.offset) - for ox, oy, info in self.glyphs] - rs = [(x1, h - y2, x2 - x1, y2 - y1) - for x1, y1, x2, y2 in self.rects] - return VectorParse(w, h + d, d, gs, rs) - - def to_raster(self): - # Metrics y's and mathtext y's are oriented in opposite directions, - # hence the switch between ymin and ymax. - xmin = min([*[ox + info.metrics.xmin for ox, oy, info in self.glyphs], - *[x1 for x1, y1, x2, y2 in self.rects], 0]) - 1 - ymin = min([*[oy - info.metrics.ymax for ox, oy, info in self.glyphs], - *[y1 for x1, y1, x2, y2 in self.rects], 0]) - 1 - xmax = max([*[ox + info.metrics.xmax for ox, oy, info in self.glyphs], - *[x2 for x1, y1, x2, y2 in self.rects], 0]) + 1 - ymax = max([*[oy - info.metrics.ymin for ox, oy, info in self.glyphs], - *[y2 for x1, y1, x2, y2 in self.rects], 0]) + 1 - w = xmax - xmin - h = ymax - ymin - self.box.depth - d = ymax - ymin - self.box.height - image = FT2Image(np.ceil(w), np.ceil(h + max(d, 0))) - - # Ideally, we could just use self.glyphs and self.rects here, shifting - # their coordinates by (-xmin, -ymin), but this yields slightly - # different results due to floating point slop; shipping twice is the - # old approach and keeps baseline images backcompat. - shifted = ship(self.box, (-xmin, -ymin)) - - for ox, oy, info in shifted.glyphs: - info.font.draw_glyph_to_bitmap( - image, ox, oy - info.metrics.iceberg, info.glyph, - antialiased=mpl.rcParams['text.antialiased']) - for x1, y1, x2, y2 in shifted.rects: - height = max(int(y2 - y1) - 1, 0) - if height == 0: - center = (y2 + y1) / 2 - y = int(center - (height + 1) / 2) - else: - y = int(y1) - image.draw_rect_filled(int(x1), y, np.ceil(x2), y + height) - return RasterParse(0, 0, w, h + d, d, image) - - -class Fonts: - """ - An abstract base class for a system of fonts to use for mathtext. - - The class must be able to take symbol keys and font file names and - return the character metrics. It also delegates to a backend class - to do the actual drawing. - """ - - def __init__(self, default_font_prop, load_glyph_flags): - """ - Parameters - ---------- - default_font_prop : `~.font_manager.FontProperties` - The default non-math font, or the base font for Unicode (generic) - font rendering. - load_glyph_flags : int - Flags passed to the glyph loader (e.g. ``FT_Load_Glyph`` and - ``FT_Load_Char`` for FreeType-based fonts). - """ - self.default_font_prop = default_font_prop - self.load_glyph_flags = load_glyph_flags - - def get_kern(self, font1, fontclass1, sym1, fontsize1, - font2, fontclass2, sym2, fontsize2, dpi): - """ - Get the kerning distance for font between *sym1* and *sym2*. - - See `~.Fonts.get_metrics` for a detailed description of the parameters. - """ - return 0. - - def get_metrics(self, font, font_class, sym, fontsize, dpi): - r""" - Parameters - ---------- - font : str - One of the TeX font names: "tt", "it", "rm", "cal", "sf", "bf", - "default", "regular", "bb", "frak", "scr". "default" and "regular" - are synonyms and use the non-math font. - font_class : str - One of the TeX font names (as for *font*), but **not** "bb", - "frak", or "scr". This is used to combine two font classes. The - only supported combination currently is ``get_metrics("frak", "bf", - ...)``. - sym : str - A symbol in raw TeX form, e.g., "1", "x", or "\sigma". - fontsize : float - Font size in points. - dpi : float - Rendering dots-per-inch. - - Returns - ------- - object - - The returned object has the following attributes (all floats, - except *slanted*): - - - *advance*: The advance distance (in points) of the glyph. - - *height*: The height of the glyph in points. - - *width*: The width of the glyph in points. - - *xmin*, *xmax*, *ymin*, *ymax*: The ink rectangle of the glyph - - *iceberg*: The distance from the baseline to the top of the - glyph. (This corresponds to TeX's definition of "height".) - - *slanted*: Whether the glyph should be considered as "slanted" - (currently used for kerning sub/superscripts). - """ - info = self._get_info(font, font_class, sym, fontsize, dpi) - return info.metrics - - def render_glyph( - self, output, ox, oy, font, font_class, sym, fontsize, dpi): - """ - At position (*ox*, *oy*), draw the glyph specified by the remaining - parameters (see `get_metrics` for their detailed description). - """ - info = self._get_info(font, font_class, sym, fontsize, dpi) - output.glyphs.append((ox, oy, info)) - - def render_rect_filled(self, output, x1, y1, x2, y2): - """ - Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*). - """ - output.rects.append((x1, y1, x2, y2)) - - def get_xheight(self, font, fontsize, dpi): - """ - Get the xheight for the given *font* and *fontsize*. - """ - raise NotImplementedError() - - def get_underline_thickness(self, font, fontsize, dpi): - """ - Get the line thickness that matches the given font. Used as a - base unit for drawing lines such as in a fraction or radical. - """ - raise NotImplementedError() - - def get_used_characters(self): - """ - Get the set of characters that were used in the math - expression. Used by backends that need to subset fonts so - they know which glyphs to include. - """ - return self.used_characters - - def get_sized_alternatives_for_symbol(self, fontname, sym): - """ - Override if your font provides multiple sizes of the same - symbol. Should return a list of symbols matching *sym* in - various sizes. The expression renderer will select the most - appropriate size for a given situation from this list. - """ - return [(fontname, sym)] - - -class TruetypeFonts(Fonts): - """ - A generic base class for all font setups that use Truetype fonts - (through FT2Font). - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - # Per-instance cache. - self._get_info = functools.lru_cache(None)(self._get_info) - self._fonts = {} - - filename = findfont(self.default_font_prop) - default_font = get_font(filename) - self._fonts['default'] = default_font - self._fonts['regular'] = default_font - - def _get_font(self, font): - if font in self.fontmap: - basename = self.fontmap[font] - else: - basename = font - cached_font = self._fonts.get(basename) - if cached_font is None and os.path.exists(basename): - cached_font = get_font(basename) - self._fonts[basename] = cached_font - self._fonts[cached_font.postscript_name] = cached_font - self._fonts[cached_font.postscript_name.lower()] = cached_font - return cached_font - - def _get_offset(self, font, glyph, fontsize, dpi): - if font.postscript_name == 'Cmex10': - return (glyph.height / 64 / 2) + (fontsize/3 * dpi/72) - return 0. - - # The return value of _get_info is cached per-instance. - def _get_info(self, fontname, font_class, sym, fontsize, dpi): - font, num, slanted = self._get_glyph(fontname, font_class, sym) - font.set_size(fontsize, dpi) - glyph = font.load_char(num, flags=self.load_glyph_flags) - - xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox] - offset = self._get_offset(font, glyph, fontsize, dpi) - metrics = types.SimpleNamespace( - advance = glyph.linearHoriAdvance/65536.0, - height = glyph.height/64.0, - width = glyph.width/64.0, - xmin = xmin, - xmax = xmax, - ymin = ymin+offset, - ymax = ymax+offset, - # iceberg is the equivalent of TeX's "height" - iceberg = glyph.horiBearingY/64.0 + offset, - slanted = slanted - ) - - return types.SimpleNamespace( - font = font, - fontsize = fontsize, - postscript_name = font.postscript_name, - metrics = metrics, - num = num, - glyph = glyph, - offset = offset - ) - - def get_xheight(self, fontname, fontsize, dpi): - font = self._get_font(fontname) - font.set_size(fontsize, dpi) - pclt = font.get_sfnt_table('pclt') - if pclt is None: - # Some fonts don't store the xHeight, so we do a poor man's xHeight - metrics = self.get_metrics( - fontname, mpl.rcParams['mathtext.default'], 'x', fontsize, dpi) - return metrics.iceberg - xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0) - return xHeight - - def get_underline_thickness(self, font, fontsize, dpi): - # This function used to grab underline thickness from the font - # metrics, but that information is just too un-reliable, so it - # is now hardcoded. - return ((0.75 / 12.0) * fontsize * dpi) / 72.0 - - def get_kern(self, font1, fontclass1, sym1, fontsize1, - font2, fontclass2, sym2, fontsize2, dpi): - if font1 == font2 and fontsize1 == fontsize2: - info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi) - info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi) - font = info1.font - return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64 - return super().get_kern(font1, fontclass1, sym1, fontsize1, - font2, fontclass2, sym2, fontsize2, dpi) - - -class BakomaFonts(TruetypeFonts): - """ - Use the Bakoma TrueType fonts for rendering. - - Symbols are strewn about a number of font files, each of which has - its own proprietary 8-bit encoding. - """ - _fontmap = { - 'cal': 'cmsy10', - 'rm': 'cmr10', - 'tt': 'cmtt10', - 'it': 'cmmi10', - 'bf': 'cmb10', - 'sf': 'cmss10', - 'ex': 'cmex10', - } - - def __init__(self, *args, **kwargs): - self._stix_fallback = StixFonts(*args, **kwargs) - - super().__init__(*args, **kwargs) - self.fontmap = {} - for key, val in self._fontmap.items(): - fullpath = findfont(val) - self.fontmap[key] = fullpath - self.fontmap[val] = fullpath - - _slanted_symbols = set(r"\int \oint".split()) - - def _get_glyph(self, fontname, font_class, sym): - font = None - if fontname in self.fontmap and sym in latex_to_bakoma: - basename, num = latex_to_bakoma[sym] - slanted = (basename == "cmmi10") or sym in self._slanted_symbols - font = self._get_font(basename) - elif len(sym) == 1: - slanted = (fontname == "it") - font = self._get_font(fontname) - if font is not None: - num = ord(sym) - if font is not None and font.get_char_index(num) != 0: - return font, num, slanted - else: - return self._stix_fallback._get_glyph(fontname, font_class, sym) - - # The Bakoma fonts contain many pre-sized alternatives for the - # delimiters. The AutoSizedChar class will use these alternatives - # and select the best (closest sized) glyph. - _size_alternatives = { - '(': [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'), - ('ex', '\xb5'), ('ex', '\xc3')], - ')': [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'), - ('ex', '\xb6'), ('ex', '\x21')], - '{': [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'), - ('ex', '\xbd'), ('ex', '\x28')], - '}': [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'), - ('ex', '\xbe'), ('ex', '\x29')], - # The fourth size of '[' is mysteriously missing from the BaKoMa - # font, so I've omitted it for both '[' and ']' - '[': [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'), - ('ex', '\x22')], - ']': [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'), - ('ex', '\x23')], - r'\lfloor': [('ex', '\xa5'), ('ex', '\x6a'), - ('ex', '\xb9'), ('ex', '\x24')], - r'\rfloor': [('ex', '\xa6'), ('ex', '\x6b'), - ('ex', '\xba'), ('ex', '\x25')], - r'\lceil': [('ex', '\xa7'), ('ex', '\x6c'), - ('ex', '\xbb'), ('ex', '\x26')], - r'\rceil': [('ex', '\xa8'), ('ex', '\x6d'), - ('ex', '\xbc'), ('ex', '\x27')], - r'\langle': [('ex', '\xad'), ('ex', '\x44'), - ('ex', '\xbf'), ('ex', '\x2a')], - r'\rangle': [('ex', '\xae'), ('ex', '\x45'), - ('ex', '\xc0'), ('ex', '\x2b')], - r'\__sqrt__': [('ex', '\x70'), ('ex', '\x71'), - ('ex', '\x72'), ('ex', '\x73')], - r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'), - ('ex', '\xc2'), ('ex', '\x2d')], - r'/': [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'), - ('ex', '\xcb'), ('ex', '\x2c')], - r'\widehat': [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'), - ('ex', '\x64')], - r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'), - ('ex', '\x67')], - r'<': [('cal', 'h'), ('ex', 'D')], - r'>': [('cal', 'i'), ('ex', 'E')] - } - - for alias, target in [(r'\leftparen', '('), - (r'\rightparent', ')'), - (r'\leftbrace', '{'), - (r'\rightbrace', '}'), - (r'\leftbracket', '['), - (r'\rightbracket', ']'), - (r'\{', '{'), - (r'\}', '}'), - (r'\[', '['), - (r'\]', ']')]: - _size_alternatives[alias] = _size_alternatives[target] - - def get_sized_alternatives_for_symbol(self, fontname, sym): - return self._size_alternatives.get(sym, [(fontname, sym)]) - - -class UnicodeFonts(TruetypeFonts): - """ - An abstract base class for handling Unicode fonts. - - While some reasonably complete Unicode fonts (such as DejaVu) may - work in some situations, the only Unicode font I'm aware of with a - complete set of math symbols is STIX. - - This class will "fallback" on the Bakoma fonts when a required - symbol can not be found in the font. - """ - - # Some glyphs are not present in the `cmr10` font, and must be brought in - # from `cmsy10`. Map the Unicode indices of those glyphs to the indices at - # which they are found in `cmsy10`. - _cmr10_substitutions = { - 0x00D7: 0x00A3, # Multiplication sign. - 0x2212: 0x00A1, # Minus sign. - } - - def __init__(self, *args, **kwargs): - # This must come first so the backend's owner is set correctly - fallback_rc = mpl.rcParams['mathtext.fallback'] - font_cls = {'stix': StixFonts, - 'stixsans': StixSansFonts, - 'cm': BakomaFonts - }.get(fallback_rc) - self._fallback_font = font_cls(*args, **kwargs) if font_cls else None - - super().__init__(*args, **kwargs) - self.fontmap = {} - for texfont in "cal rm tt it bf sf".split(): - prop = mpl.rcParams['mathtext.' + texfont] - font = findfont(prop) - self.fontmap[texfont] = font - prop = FontProperties('cmex10') - font = findfont(prop) - self.fontmap['ex'] = font - - # include STIX sized alternatives for glyphs if fallback is STIX - if isinstance(self._fallback_font, StixFonts): - stixsizedaltfonts = { - 0: 'STIXGeneral', - 1: 'STIXSizeOneSym', - 2: 'STIXSizeTwoSym', - 3: 'STIXSizeThreeSym', - 4: 'STIXSizeFourSym', - 5: 'STIXSizeFiveSym'} - - for size, name in stixsizedaltfonts.items(): - fullpath = findfont(name) - self.fontmap[size] = fullpath - self.fontmap[name] = fullpath - - _slanted_symbols = set(r"\int \oint".split()) - - def _map_virtual_font(self, fontname, font_class, uniindex): - return fontname, uniindex - - def _get_glyph(self, fontname, font_class, sym): - try: - uniindex = get_unicode_index(sym) - found_symbol = True - except ValueError: - uniindex = ord('?') - found_symbol = False - _log.warning("No TeX to Unicode mapping for {!a}.".format(sym)) - - fontname, uniindex = self._map_virtual_font( - fontname, font_class, uniindex) - - new_fontname = fontname - - # Only characters in the "Letter" class should be italicized in 'it' - # mode. Greek capital letters should be Roman. - if found_symbol: - if fontname == 'it' and uniindex < 0x10000: - char = chr(uniindex) - if (unicodedata.category(char)[0] != "L" - or unicodedata.name(char).startswith("GREEK CAPITAL")): - new_fontname = 'rm' - - slanted = (new_fontname == 'it') or sym in self._slanted_symbols - found_symbol = False - font = self._get_font(new_fontname) - if font is not None: - if (uniindex in self._cmr10_substitutions - and font.family_name == "cmr10"): - font = get_font( - cbook._get_data_path("fonts/ttf/cmsy10.ttf")) - uniindex = self._cmr10_substitutions[uniindex] - glyphindex = font.get_char_index(uniindex) - if glyphindex != 0: - found_symbol = True - - if not found_symbol: - if self._fallback_font: - if (fontname in ('it', 'regular') - and isinstance(self._fallback_font, StixFonts)): - fontname = 'rm' - - g = self._fallback_font._get_glyph(fontname, font_class, sym) - family = g[0].family_name - if family in list(BakomaFonts._fontmap.values()): - family = "Computer Modern" - _log.info("Substituting symbol %s from %s", sym, family) - return g - - else: - if (fontname in ('it', 'regular') - and isinstance(self, StixFonts)): - return self._get_glyph('rm', font_class, sym) - _log.warning("Font {!r} does not have a glyph for {!a} " - "[U+{:x}], substituting with a dummy " - "symbol.".format(new_fontname, sym, uniindex)) - font = self._get_font('rm') - uniindex = 0xA4 # currency char, for lack of anything better - slanted = False - - return font, uniindex, slanted - - def get_sized_alternatives_for_symbol(self, fontname, sym): - if self._fallback_font: - return self._fallback_font.get_sized_alternatives_for_symbol( - fontname, sym) - return [(fontname, sym)] - - -class DejaVuFonts(UnicodeFonts): - - def __init__(self, *args, **kwargs): - # This must come first so the backend's owner is set correctly - if isinstance(self, DejaVuSerifFonts): - self._fallback_font = StixFonts(*args, **kwargs) - else: - self._fallback_font = StixSansFonts(*args, **kwargs) - self.bakoma = BakomaFonts(*args, **kwargs) - TruetypeFonts.__init__(self, *args, **kwargs) - self.fontmap = {} - # Include Stix sized alternatives for glyphs - self._fontmap.update({ - 1: 'STIXSizeOneSym', - 2: 'STIXSizeTwoSym', - 3: 'STIXSizeThreeSym', - 4: 'STIXSizeFourSym', - 5: 'STIXSizeFiveSym', - }) - for key, name in self._fontmap.items(): - fullpath = findfont(name) - self.fontmap[key] = fullpath - self.fontmap[name] = fullpath - - def _get_glyph(self, fontname, font_class, sym): - # Override prime symbol to use Bakoma. - if sym == r'\prime': - return self.bakoma._get_glyph(fontname, font_class, sym) - else: - # check whether the glyph is available in the display font - uniindex = get_unicode_index(sym) - font = self._get_font('ex') - if font is not None: - glyphindex = font.get_char_index(uniindex) - if glyphindex != 0: - return super()._get_glyph('ex', font_class, sym) - # otherwise return regular glyph - return super()._get_glyph(fontname, font_class, sym) - - -class DejaVuSerifFonts(DejaVuFonts): - """ - A font handling class for the DejaVu Serif fonts - - If a glyph is not found it will fallback to Stix Serif - """ - _fontmap = { - 'rm': 'DejaVu Serif', - 'it': 'DejaVu Serif:italic', - 'bf': 'DejaVu Serif:weight=bold', - 'sf': 'DejaVu Sans', - 'tt': 'DejaVu Sans Mono', - 'ex': 'DejaVu Serif Display', - 0: 'DejaVu Serif', - } - - -class DejaVuSansFonts(DejaVuFonts): - """ - A font handling class for the DejaVu Sans fonts - - If a glyph is not found it will fallback to Stix Sans - """ - _fontmap = { - 'rm': 'DejaVu Sans', - 'it': 'DejaVu Sans:italic', - 'bf': 'DejaVu Sans:weight=bold', - 'sf': 'DejaVu Sans', - 'tt': 'DejaVu Sans Mono', - 'ex': 'DejaVu Sans Display', - 0: 'DejaVu Sans', - } - - -class StixFonts(UnicodeFonts): - """ - A font handling class for the STIX fonts. - - In addition to what UnicodeFonts provides, this class: - - - supports "virtual fonts" which are complete alpha numeric - character sets with different font styles at special Unicode - code points, such as "Blackboard". - - - handles sized alternative characters for the STIXSizeX fonts. - """ - _fontmap = { - 'rm': 'STIXGeneral', - 'it': 'STIXGeneral:italic', - 'bf': 'STIXGeneral:weight=bold', - 'nonunirm': 'STIXNonUnicode', - 'nonuniit': 'STIXNonUnicode:italic', - 'nonunibf': 'STIXNonUnicode:weight=bold', - 0: 'STIXGeneral', - 1: 'STIXSizeOneSym', - 2: 'STIXSizeTwoSym', - 3: 'STIXSizeThreeSym', - 4: 'STIXSizeFourSym', - 5: 'STIXSizeFiveSym', - } - _fallback_font = False - _sans = False - - def __init__(self, *args, **kwargs): - TruetypeFonts.__init__(self, *args, **kwargs) - self.fontmap = {} - for key, name in self._fontmap.items(): - fullpath = findfont(name) - self.fontmap[key] = fullpath - self.fontmap[name] = fullpath - - def _map_virtual_font(self, fontname, font_class, uniindex): - # Handle these "fonts" that are actually embedded in - # other fonts. - mapping = stix_virtual_fonts.get(fontname) - if (self._sans and mapping is None - and fontname not in ('regular', 'default')): - mapping = stix_virtual_fonts['sf'] - doing_sans_conversion = True - else: - doing_sans_conversion = False - - if mapping is not None: - if isinstance(mapping, dict): - try: - mapping = mapping[font_class] - except KeyError: - mapping = mapping['rm'] - - # Binary search for the source glyph - lo = 0 - hi = len(mapping) - while lo < hi: - mid = (lo+hi)//2 - range = mapping[mid] - if uniindex < range[0]: - hi = mid - elif uniindex <= range[1]: - break - else: - lo = mid + 1 - - if range[0] <= uniindex <= range[1]: - uniindex = uniindex - range[0] + range[3] - fontname = range[2] - elif not doing_sans_conversion: - # This will generate a dummy character - uniindex = 0x1 - fontname = mpl.rcParams['mathtext.default'] - - # Fix some incorrect glyphs. - if fontname in ('rm', 'it'): - uniindex = stix_glyph_fixes.get(uniindex, uniindex) - - # Handle private use area glyphs - if fontname in ('it', 'rm', 'bf') and 0xe000 <= uniindex <= 0xf8ff: - fontname = 'nonuni' + fontname - - return fontname, uniindex - - @functools.lru_cache() - def get_sized_alternatives_for_symbol(self, fontname, sym): - fixes = { - '\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']', - '<': '\N{MATHEMATICAL LEFT ANGLE BRACKET}', - '>': '\N{MATHEMATICAL RIGHT ANGLE BRACKET}', - } - sym = fixes.get(sym, sym) - try: - uniindex = get_unicode_index(sym) - except ValueError: - return [(fontname, sym)] - alternatives = [(i, chr(uniindex)) for i in range(6) - if self._get_font(i).get_char_index(uniindex) != 0] - # The largest size of the radical symbol in STIX has incorrect - # metrics that cause it to be disconnected from the stem. - if sym == r'\__sqrt__': - alternatives = alternatives[:-1] - return alternatives - - -class StixSansFonts(StixFonts): - """ - A font handling class for the STIX fonts (that uses sans-serif - characters by default). - """ - _sans = True - - -############################################################################## -# TeX-LIKE BOX MODEL - -# The following is based directly on the document 'woven' from the -# TeX82 source code. This information is also available in printed -# form: -# -# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B: -# TeX: The Program. Addison-Wesley Professional. -# -# The most relevant "chapters" are: -# Data structures for boxes and their friends -# Shipping pages out (ship()) -# Packaging (hpack() and vpack()) -# Data structures for math mode -# Subroutines for math mode -# Typesetting math formulas -# -# Many of the docstrings below refer to a numbered "node" in that -# book, e.g., node123 -# -# Note that (as TeX) y increases downward, unlike many other parts of -# matplotlib. - -# How much text shrinks when going to the next-smallest level. -SHRINK_FACTOR = 0.7 -# The number of different sizes of chars to use, beyond which they will not -# get any smaller -NUM_SIZE_LEVELS = 6 - - -class FontConstantsBase: - """ - A set of constants that controls how certain things, such as sub- - and superscripts are laid out. These are all metrics that can't - be reliably retrieved from the font metrics in the font itself. - """ - # Percentage of x-height of additional horiz. space after sub/superscripts - script_space = 0.05 - - # Percentage of x-height that sub/superscripts drop below the baseline - subdrop = 0.4 - - # Percentage of x-height that superscripts are raised from the baseline - sup1 = 0.7 - - # Percentage of x-height that subscripts drop below the baseline - sub1 = 0.3 - - # Percentage of x-height that subscripts drop below the baseline when a - # superscript is present - sub2 = 0.5 - - # Percentage of x-height that sub/superscripts are offset relative to the - # nucleus edge for non-slanted nuclei - delta = 0.025 - - # Additional percentage of last character height above 2/3 of the - # x-height that superscripts are offset relative to the subscript - # for slanted nuclei - delta_slanted = 0.2 - - # Percentage of x-height that superscripts and subscripts are offset for - # integrals - delta_integral = 0.1 - - -class ComputerModernFontConstants(FontConstantsBase): - script_space = 0.075 - subdrop = 0.2 - sup1 = 0.45 - sub1 = 0.2 - sub2 = 0.3 - delta = 0.075 - delta_slanted = 0.3 - delta_integral = 0.3 - - -class STIXFontConstants(FontConstantsBase): - script_space = 0.1 - sup1 = 0.8 - sub2 = 0.6 - delta = 0.05 - delta_slanted = 0.3 - delta_integral = 0.3 - - -class STIXSansFontConstants(FontConstantsBase): - script_space = 0.05 - sup1 = 0.8 - delta_slanted = 0.6 - delta_integral = 0.3 - - -class DejaVuSerifFontConstants(FontConstantsBase): - pass - - -class DejaVuSansFontConstants(FontConstantsBase): - pass - - -# Maps font family names to the FontConstantBase subclass to use -_font_constant_mapping = { - 'DejaVu Sans': DejaVuSansFontConstants, - 'DejaVu Sans Mono': DejaVuSansFontConstants, - 'DejaVu Serif': DejaVuSerifFontConstants, - 'cmb10': ComputerModernFontConstants, - 'cmex10': ComputerModernFontConstants, - 'cmmi10': ComputerModernFontConstants, - 'cmr10': ComputerModernFontConstants, - 'cmss10': ComputerModernFontConstants, - 'cmsy10': ComputerModernFontConstants, - 'cmtt10': ComputerModernFontConstants, - 'STIXGeneral': STIXFontConstants, - 'STIXNonUnicode': STIXFontConstants, - 'STIXSizeFiveSym': STIXFontConstants, - 'STIXSizeFourSym': STIXFontConstants, - 'STIXSizeThreeSym': STIXFontConstants, - 'STIXSizeTwoSym': STIXFontConstants, - 'STIXSizeOneSym': STIXFontConstants, - # Map the fonts we used to ship, just for good measure - 'Bitstream Vera Sans': DejaVuSansFontConstants, - 'Bitstream Vera': DejaVuSansFontConstants, - } - - -def _get_font_constant_set(state): - constants = _font_constant_mapping.get( - state.fontset._get_font(state.font).family_name, FontConstantsBase) - # STIX sans isn't really its own fonts, just different code points - # in the STIX fonts, so we have to detect this one separately. - if (constants is STIXFontConstants and - isinstance(state.fontset, StixSansFonts)): - return STIXSansFontConstants - return constants - - -class Node: - """A node in the TeX box model.""" - - def __init__(self): - self.size = 0 - - def __repr__(self): - return type(self).__name__ - - def get_kerning(self, next): - return 0.0 - - def shrink(self): - """ - Shrinks one level smaller. There are only three levels of - sizes, after which things will no longer get smaller. - """ - self.size += 1 - - def render(self, output, x, y): - """Render this node.""" - - -class Box(Node): - """A node with a physical location.""" - - def __init__(self, width, height, depth): - super().__init__() - self.width = width - self.height = height - self.depth = depth - - def shrink(self): - super().shrink() - if self.size < NUM_SIZE_LEVELS: - self.width *= SHRINK_FACTOR - self.height *= SHRINK_FACTOR - self.depth *= SHRINK_FACTOR - - def render(self, output, x1, y1, x2, y2): - pass - - -class Vbox(Box): - """A box with only height (zero width).""" - - def __init__(self, height, depth): - super().__init__(0., height, depth) - - -class Hbox(Box): - """A box with only width (zero height and depth).""" - - def __init__(self, width): - super().__init__(width, 0., 0.) - - -class Char(Node): - """ - A single character. - - Unlike TeX, the font information and metrics are stored with each `Char` - to make it easier to lookup the font metrics when needed. Note that TeX - boxes have a width, height, and depth, unlike Type1 and TrueType which use - a full bounding box and an advance in the x-direction. The metrics must - be converted to the TeX model, and the advance (if different from width) - must be converted into a `Kern` node when the `Char` is added to its parent - `Hlist`. - """ - - def __init__(self, c, state): - super().__init__() - self.c = c - self.fontset = state.fontset - self.font = state.font - self.font_class = state.font_class - self.fontsize = state.fontsize - self.dpi = state.dpi - # The real width, height and depth will be set during the - # pack phase, after we know the real fontsize - self._update_metrics() - - def __repr__(self): - return '`%s`' % self.c - - def _update_metrics(self): - metrics = self._metrics = self.fontset.get_metrics( - self.font, self.font_class, self.c, self.fontsize, self.dpi) - if self.c == ' ': - self.width = metrics.advance - else: - self.width = metrics.width - self.height = metrics.iceberg - self.depth = -(metrics.iceberg - metrics.height) - - def is_slanted(self): - return self._metrics.slanted - - def get_kerning(self, next): - """ - Return the amount of kerning between this and the given character. - - This method is called when characters are strung together into `Hlist` - to create `Kern` nodes. - """ - advance = self._metrics.advance - self.width - kern = 0. - if isinstance(next, Char): - kern = self.fontset.get_kern( - self.font, self.font_class, self.c, self.fontsize, - next.font, next.font_class, next.c, next.fontsize, - self.dpi) - return advance + kern - - def render(self, output, x, y): - self.fontset.render_glyph( - output, x, y, - self.font, self.font_class, self.c, self.fontsize, self.dpi) - - def shrink(self): - super().shrink() - if self.size < NUM_SIZE_LEVELS: - self.fontsize *= SHRINK_FACTOR - self.width *= SHRINK_FACTOR - self.height *= SHRINK_FACTOR - self.depth *= SHRINK_FACTOR - - -class Accent(Char): - """ - The font metrics need to be dealt with differently for accents, - since they are already offset correctly from the baseline in - TrueType fonts. - """ - def _update_metrics(self): - metrics = self._metrics = self.fontset.get_metrics( - self.font, self.font_class, self.c, self.fontsize, self.dpi) - self.width = metrics.xmax - metrics.xmin - self.height = metrics.ymax - metrics.ymin - self.depth = 0 - - def shrink(self): - super().shrink() - self._update_metrics() - - def render(self, output, x, y): - self.fontset.render_glyph( - output, x - self._metrics.xmin, y + self._metrics.ymin, - self.font, self.font_class, self.c, self.fontsize, self.dpi) - - -class List(Box): - """A list of nodes (either horizontal or vertical).""" - - def __init__(self, elements): - super().__init__(0., 0., 0.) - self.shift_amount = 0. # An arbitrary offset - self.children = elements # The child nodes of this list - # The following parameters are set in the vpack and hpack functions - self.glue_set = 0. # The glue setting of this list - self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching - self.glue_order = 0 # The order of infinity (0 - 3) for the glue - - def __repr__(self): - return '%s[%s]' % ( - super().__repr__(), - self.width, self.height, - self.depth, self.shift_amount, - ', '.join([repr(x) for x in self.children])) - - def _set_glue(self, x, sign, totals, error_type): - self.glue_order = o = next( - # Highest order of glue used by the members of this list. - (i for i in range(len(totals))[::-1] if totals[i] != 0), 0) - self.glue_sign = sign - if totals[o] != 0.: - self.glue_set = x / totals[o] - else: - self.glue_sign = 0 - self.glue_ratio = 0. - if o == 0: - if len(self.children): - _log.warning("%s %s: %r", - error_type, type(self).__name__, self) - - def shrink(self): - for child in self.children: - child.shrink() - super().shrink() - if self.size < NUM_SIZE_LEVELS: - self.shift_amount *= SHRINK_FACTOR - self.glue_set *= SHRINK_FACTOR - - -class Hlist(List): - """A horizontal list of boxes.""" - - def __init__(self, elements, w=0., m='additional', do_kern=True): - super().__init__(elements) - if do_kern: - self.kern() - self.hpack(w=w, m=m) - - def kern(self): - """ - Insert `Kern` nodes between `Char` nodes to set kerning. - - The `Char` nodes themselves determine the amount of kerning they need - (in `~Char.get_kerning`), and this function just creates the correct - linked list. - """ - new_children = [] - num_children = len(self.children) - if num_children: - for i in range(num_children): - elem = self.children[i] - if i < num_children - 1: - next = self.children[i + 1] - else: - next = None - - new_children.append(elem) - kerning_distance = elem.get_kerning(next) - if kerning_distance != 0.: - kern = Kern(kerning_distance) - new_children.append(kern) - self.children = new_children - - # This is a failed experiment to fake cross-font kerning. -# def get_kerning(self, next): -# if len(self.children) >= 2 and isinstance(self.children[-2], Char): -# if isinstance(next, Char): -# print "CASE A" -# return self.children[-2].get_kerning(next) -# elif (isinstance(next, Hlist) and len(next.children) -# and isinstance(next.children[0], Char)): -# print "CASE B" -# result = self.children[-2].get_kerning(next.children[0]) -# print result -# return result -# return 0.0 - - def hpack(self, w=0., m='additional'): - r""" - Compute the dimensions of the resulting boxes, and adjust the glue if - one of those dimensions is pre-specified. The computed sizes normally - enclose all of the material inside the new box; but some items may - stick out if negative glue is used, if the box is overfull, or if a - ``\vbox`` includes other boxes that have been shifted left. - - Parameters - ---------- - w : float, default: 0 - A width. - m : {'exactly', 'additional'}, default: 'additional' - Whether to produce a box whose width is 'exactly' *w*; or a box - with the natural width of the contents, plus *w* ('additional'). - - Notes - ----- - The defaults produce a box with the natural width of the contents. - """ - # I don't know why these get reset in TeX. Shift_amount is pretty - # much useless if we do. - # self.shift_amount = 0. - h = 0. - d = 0. - x = 0. - total_stretch = [0.] * 4 - total_shrink = [0.] * 4 - for p in self.children: - if isinstance(p, Char): - x += p.width - h = max(h, p.height) - d = max(d, p.depth) - elif isinstance(p, Box): - x += p.width - if not np.isinf(p.height) and not np.isinf(p.depth): - s = getattr(p, 'shift_amount', 0.) - h = max(h, p.height - s) - d = max(d, p.depth + s) - elif isinstance(p, Glue): - glue_spec = p.glue_spec - x += glue_spec.width - total_stretch[glue_spec.stretch_order] += glue_spec.stretch - total_shrink[glue_spec.shrink_order] += glue_spec.shrink - elif isinstance(p, Kern): - x += p.width - self.height = h - self.depth = d - - if m == 'additional': - w += x - self.width = w - x = w - x - - if x == 0.: - self.glue_sign = 0 - self.glue_order = 0 - self.glue_ratio = 0. - return - if x > 0.: - self._set_glue(x, 1, total_stretch, "Overful") - else: - self._set_glue(x, -1, total_shrink, "Underful") - - -class Vlist(List): - """A vertical list of boxes.""" - - def __init__(self, elements, h=0., m='additional'): - super().__init__(elements) - self.vpack(h=h, m=m) - - def vpack(self, h=0., m='additional', l=np.inf): - """ - Compute the dimensions of the resulting boxes, and to adjust the glue - if one of those dimensions is pre-specified. - - Parameters - ---------- - h : float, default: 0 - A height. - m : {'exactly', 'additional'}, default: 'additional' - Whether to produce a box whose height is 'exactly' *h*; or a box - with the natural height of the contents, plus *h* ('additional'). - l : float, default: np.inf - The maximum height. - - Notes - ----- - The defaults produce a box with the natural height of the contents. - """ - # I don't know why these get reset in TeX. Shift_amount is pretty - # much useless if we do. - # self.shift_amount = 0. - w = 0. - d = 0. - x = 0. - total_stretch = [0.] * 4 - total_shrink = [0.] * 4 - for p in self.children: - if isinstance(p, Box): - x += d + p.height - d = p.depth - if not np.isinf(p.width): - s = getattr(p, 'shift_amount', 0.) - w = max(w, p.width + s) - elif isinstance(p, Glue): - x += d - d = 0. - glue_spec = p.glue_spec - x += glue_spec.width - total_stretch[glue_spec.stretch_order] += glue_spec.stretch - total_shrink[glue_spec.shrink_order] += glue_spec.shrink - elif isinstance(p, Kern): - x += d + p.width - d = 0. - elif isinstance(p, Char): - raise RuntimeError( - "Internal mathtext error: Char node found in Vlist") - - self.width = w - if d > l: - x += d - l - self.depth = l - else: - self.depth = d - - if m == 'additional': - h += x - self.height = h - x = h - x - - if x == 0: - self.glue_sign = 0 - self.glue_order = 0 - self.glue_ratio = 0. - return - - if x > 0.: - self._set_glue(x, 1, total_stretch, "Overful") - else: - self._set_glue(x, -1, total_shrink, "Underful") - - -class Rule(Box): - """ - A solid black rectangle. - - It has *width*, *depth*, and *height* fields just as in an `Hlist`. - However, if any of these dimensions is inf, the actual value will be - determined by running the rule up to the boundary of the innermost - enclosing box. This is called a "running dimension". The width is never - running in an `Hlist`; the height and depth are never running in a `Vlist`. - """ - - def __init__(self, width, height, depth, state): - super().__init__(width, height, depth) - self.fontset = state.fontset - - def render(self, output, x, y, w, h): - self.fontset.render_rect_filled(output, x, y, x + w, y + h) - - -class Hrule(Rule): - """Convenience class to create a horizontal rule.""" - - def __init__(self, state, thickness=None): - if thickness is None: - thickness = state.get_current_underline_thickness() - height = depth = thickness * 0.5 - super().__init__(np.inf, height, depth, state) - - -class Vrule(Rule): - """Convenience class to create a vertical rule.""" - - def __init__(self, state): - thickness = state.get_current_underline_thickness() - super().__init__(thickness, np.inf, np.inf, state) - - -_GlueSpec = namedtuple( - "_GlueSpec", "width stretch stretch_order shrink shrink_order") -_GlueSpec._named = { - 'fil': _GlueSpec(0., 1., 1, 0., 0), - 'fill': _GlueSpec(0., 1., 2, 0., 0), - 'filll': _GlueSpec(0., 1., 3, 0., 0), - 'neg_fil': _GlueSpec(0., 0., 0, 1., 1), - 'neg_fill': _GlueSpec(0., 0., 0, 1., 2), - 'neg_filll': _GlueSpec(0., 0., 0, 1., 3), - 'empty': _GlueSpec(0., 0., 0, 0., 0), - 'ss': _GlueSpec(0., 1., 1, -1., 1), -} - - -class Glue(Node): - """ - Most of the information in this object is stored in the underlying - ``_GlueSpec`` class, which is shared between multiple glue objects. - (This is a memory optimization which probably doesn't matter anymore, but - it's easier to stick to what TeX does.) - """ - - def __init__(self, glue_type): - super().__init__() - if isinstance(glue_type, str): - glue_spec = _GlueSpec._named[glue_type] - elif isinstance(glue_type, _GlueSpec): - glue_spec = glue_type - else: - raise ValueError("glue_type must be a glue spec name or instance") - self.glue_spec = glue_spec - - def shrink(self): - super().shrink() - if self.size < NUM_SIZE_LEVELS: - g = self.glue_spec - self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR) - - -class HCentered(Hlist): - """ - A convenience class to create an `Hlist` whose contents are - centered within its enclosing box. - """ - - def __init__(self, elements): - super().__init__([Glue('ss'), *elements, Glue('ss')], do_kern=False) - - -class VCentered(Vlist): - """ - A convenience class to create a `Vlist` whose contents are - centered within its enclosing box. - """ - - def __init__(self, elements): - super().__init__([Glue('ss'), *elements, Glue('ss')]) - - -class Kern(Node): - """ - A `Kern` node has a width field to specify a (normally - negative) amount of spacing. This spacing correction appears in - horizontal lists between letters like A and V when the font - designer said that it looks better to move them closer together or - further apart. A kern node can also appear in a vertical list, - when its *width* denotes additional spacing in the vertical - direction. - """ - - height = 0 - depth = 0 - - def __init__(self, width): - super().__init__() - self.width = width - - def __repr__(self): - return "k%.02f" % self.width - - def shrink(self): - super().shrink() - if self.size < NUM_SIZE_LEVELS: - self.width *= SHRINK_FACTOR - - -class AutoHeightChar(Hlist): - """ - A character as close to the given height and depth as possible. - - When using a font with multiple height versions of some characters (such as - the BaKoMa fonts), the correct glyph will be selected, otherwise this will - always just return a scaled version of the glyph. - """ - - def __init__(self, c, height, depth, state, always=False, factor=None): - alternatives = state.fontset.get_sized_alternatives_for_symbol( - state.font, c) - - xHeight = state.fontset.get_xheight( - state.font, state.fontsize, state.dpi) - - state = state.copy() - target_total = height + depth - for fontname, sym in alternatives: - state.font = fontname - char = Char(sym, state) - # Ensure that size 0 is chosen when the text is regular sized but - # with descender glyphs by subtracting 0.2 * xHeight - if char.height + char.depth >= target_total - 0.2 * xHeight: - break - - shift = 0 - if state.font != 0 or len(alternatives) == 1: - if factor is None: - factor = target_total / (char.height + char.depth) - state.fontsize *= factor - char = Char(sym, state) - - shift = (depth - char.depth) - - super().__init__([char]) - self.shift_amount = shift - - -class AutoWidthChar(Hlist): - """ - A character as close to the given width as possible. - - When using a font with multiple width versions of some characters (such as - the BaKoMa fonts), the correct glyph will be selected, otherwise this will - always just return a scaled version of the glyph. - """ - - def __init__(self, c, width, state, always=False, char_class=Char): - alternatives = state.fontset.get_sized_alternatives_for_symbol( - state.font, c) - - state = state.copy() - for fontname, sym in alternatives: - state.font = fontname - char = char_class(sym, state) - if char.width >= width: - break - - factor = width / char.width - state.fontsize *= factor - char = char_class(sym, state) - - super().__init__([char]) - self.width = char.width - - -def ship(box, xy=(0, 0)): - """ - Ship out *box* at offset *xy*, converting it to an `Output`. - - Since boxes can be inside of boxes inside of boxes, the main work of `ship` - is done by two mutually recursive routines, `hlist_out` and `vlist_out`, - which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal - and vertical boxes. The global variables used in TeX to store state as it - processes have become local variables here. - """ - ox, oy = xy - cur_v = 0. - cur_h = 0. - off_h = ox - off_v = oy + box.height - output = Output(box) - - def clamp(value): - return -1e9 if value < -1e9 else +1e9 if value > +1e9 else value - - def hlist_out(box): - nonlocal cur_v, cur_h, off_h, off_v - - cur_g = 0 - cur_glue = 0. - glue_order = box.glue_order - glue_sign = box.glue_sign - base_line = cur_v - left_edge = cur_h - - for p in box.children: - if isinstance(p, Char): - p.render(output, cur_h + off_h, cur_v + off_v) - cur_h += p.width - elif isinstance(p, Kern): - cur_h += p.width - elif isinstance(p, List): - # node623 - if len(p.children) == 0: - cur_h += p.width - else: - edge = cur_h - cur_v = base_line + p.shift_amount - if isinstance(p, Hlist): - hlist_out(p) - else: - # p.vpack(box.height + box.depth, 'exactly') - vlist_out(p) - cur_h = edge + p.width - cur_v = base_line - elif isinstance(p, Box): - # node624 - rule_height = p.height - rule_depth = p.depth - rule_width = p.width - if np.isinf(rule_height): - rule_height = box.height - if np.isinf(rule_depth): - rule_depth = box.depth - if rule_height > 0 and rule_width > 0: - cur_v = base_line + rule_depth - p.render(output, - cur_h + off_h, cur_v + off_v, - rule_width, rule_height) - cur_v = base_line - cur_h += rule_width - elif isinstance(p, Glue): - # node625 - glue_spec = p.glue_spec - rule_width = glue_spec.width - cur_g - if glue_sign != 0: # normal - if glue_sign == 1: # stretching - if glue_spec.stretch_order == glue_order: - cur_glue += glue_spec.stretch - cur_g = round(clamp(box.glue_set * cur_glue)) - elif glue_spec.shrink_order == glue_order: - cur_glue += glue_spec.shrink - cur_g = round(clamp(box.glue_set * cur_glue)) - rule_width += cur_g - cur_h += rule_width - - def vlist_out(box): - nonlocal cur_v, cur_h, off_h, off_v - - cur_g = 0 - cur_glue = 0. - glue_order = box.glue_order - glue_sign = box.glue_sign - left_edge = cur_h - cur_v -= box.height - top_edge = cur_v - - for p in box.children: - if isinstance(p, Kern): - cur_v += p.width - elif isinstance(p, List): - if len(p.children) == 0: - cur_v += p.height + p.depth - else: - cur_v += p.height - cur_h = left_edge + p.shift_amount - save_v = cur_v - p.width = box.width - if isinstance(p, Hlist): - hlist_out(p) - else: - vlist_out(p) - cur_v = save_v + p.depth - cur_h = left_edge - elif isinstance(p, Box): - rule_height = p.height - rule_depth = p.depth - rule_width = p.width - if np.isinf(rule_width): - rule_width = box.width - rule_height += rule_depth - if rule_height > 0 and rule_depth > 0: - cur_v += rule_height - p.render(output, - cur_h + off_h, cur_v + off_v, - rule_width, rule_height) - elif isinstance(p, Glue): - glue_spec = p.glue_spec - rule_height = glue_spec.width - cur_g - if glue_sign != 0: # normal - if glue_sign == 1: # stretching - if glue_spec.stretch_order == glue_order: - cur_glue += glue_spec.stretch - cur_g = round(clamp(box.glue_set * cur_glue)) - elif glue_spec.shrink_order == glue_order: # shrinking - cur_glue += glue_spec.shrink - cur_g = round(clamp(box.glue_set * cur_glue)) - rule_height += cur_g - cur_v += rule_height - elif isinstance(p, Char): - raise RuntimeError( - "Internal mathtext error: Char node found in vlist") - - hlist_out(box) - return output - - -############################################################################## -# PARSER - - -def Error(msg): - """Helper class to raise parser errors.""" - def raise_error(s, loc, toks): - raise ParseFatalException(s, loc, msg) - - return Empty().setParseAction(raise_error) - - -class ParserState: - """ - Parser state. - - States are pushed and popped from a stack as necessary, and the "current" - state is always at the top of the stack. - - Upon entering and leaving a group { } or math/non-math, the stack is pushed - and popped accordingly. - """ - - def __init__(self, fontset, font, font_class, fontsize, dpi): - self.fontset = fontset - self._font = font - self.font_class = font_class - self.fontsize = fontsize - self.dpi = dpi - - def copy(self): - return copy.copy(self) - - @property - def font(self): - return self._font - - @font.setter - def font(self, name): - if name in ('rm', 'it', 'bf'): - self.font_class = name - self._font = name - - def get_current_underline_thickness(self): - """Return the underline thickness for this state.""" - return self.fontset.get_underline_thickness( - self.font, self.fontsize, self.dpi) - - -def cmd(expr, args): - r""" - Helper to define TeX commands. - - ``cmd("\cmd", args)`` is equivalent to - ``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in - the error message are taken from element names in *args*. If *expr* - already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped - when constructing the parse element, but kept (and *expr* is used as is) in - the error message. - """ - - def names(elt): - if isinstance(elt, ParseExpression): - for expr in elt.exprs: - yield from names(expr) - elif elt.resultsName: - yield elt.resultsName - - csname = expr.split("{", 1)[0] - err = (csname + "".join("{%s}" % name for name in names(args)) - if expr == csname else expr) - return csname - (args | Error(f"Expected {err}")) - - -class Parser: - """ - A pyparsing-based parser for strings containing math expressions. - - Raw text may also appear outside of pairs of ``$``. - - The grammar is based directly on that in TeX, though it cuts a few corners. - """ - - class _MathStyle(enum.Enum): - DISPLAYSTYLE = 0 - TEXTSTYLE = 1 - SCRIPTSTYLE = 2 - SCRIPTSCRIPTSTYLE = 3 - - _binary_operators = set( - '+ * - \N{MINUS SIGN}' - r''' - \pm \sqcap \rhd - \mp \sqcup \unlhd - \times \vee \unrhd - \div \wedge \oplus - \ast \setminus \ominus - \star \wr \otimes - \circ \diamond \oslash - \bullet \bigtriangleup \odot - \cdot \bigtriangledown \bigcirc - \cap \triangleleft \dagger - \cup \triangleright \ddagger - \uplus \lhd \amalg - \dotplus \dotminus'''.split()) - - _relation_symbols = set(r''' - = < > : - \leq \geq \equiv \models - \prec \succ \sim \perp - \preceq \succeq \simeq \mid - \ll \gg \asymp \parallel - \subset \supset \approx \bowtie - \subseteq \supseteq \cong \Join - \sqsubset \sqsupset \neq \smile - \sqsubseteq \sqsupseteq \doteq \frown - \in \ni \propto \vdash - \dashv \dots \doteqdot'''.split()) - - _arrow_symbols = set(r''' - \leftarrow \longleftarrow \uparrow - \Leftarrow \Longleftarrow \Uparrow - \rightarrow \longrightarrow \downarrow - \Rightarrow \Longrightarrow \Downarrow - \leftrightarrow \longleftrightarrow \updownarrow - \Leftrightarrow \Longleftrightarrow \Updownarrow - \mapsto \longmapsto \nearrow - \hookleftarrow \hookrightarrow \searrow - \leftharpoonup \rightharpoonup \swarrow - \leftharpoondown \rightharpoondown \nwarrow - \rightleftharpoons \leadsto'''.split()) - - _spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols - - _punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split()) - - _overunder_symbols = set(r''' - \sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee - \bigwedge \bigodot \bigotimes \bigoplus \biguplus - '''.split()) - - _overunder_functions = set("lim liminf limsup sup max min".split()) - - _dropsub_symbols = set(r'''\int \oint'''.split()) - - _fontnames = set("rm cal it tt sf bf default bb frak scr regular".split()) - - _function_names = set(""" - arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim - liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan - coth inf max tanh""".split()) - - _ambi_delims = set(r""" - | \| / \backslash \uparrow \downarrow \updownarrow \Uparrow - \Downarrow \Updownarrow . \vert \Vert""".split()) - _left_delims = set(r"( [ \{ < \lfloor \langle \lceil".split()) - _right_delims = set(r") ] \} > \rfloor \rangle \rceil".split()) - _delims = _left_delims | _right_delims | _ambi_delims - - def __init__(self): - p = types.SimpleNamespace() - - def set_names_and_parse_actions(): - for key, val in vars(p).items(): - if not key.startswith('_'): - # Set names on everything -- very useful for debugging - val.setName(key) - # Set actions - if hasattr(self, key): - val.setParseAction(getattr(self, key)) - - # Root definitions. - - # In TeX parlance, a csname is a control sequence name (a "\foo"). - def csnames(group, names): - ends_with_alpha = [] - ends_with_nonalpha = [] - for name in names: - if name[-1].isalpha(): - ends_with_alpha.append(name) - else: - ends_with_nonalpha.append(name) - return Regex(r"\\(?P<{}>(?:{})(?![A-Za-z]){})".format( - group, - "|".join(map(re.escape, ends_with_alpha)), - "".join(f"|{s}" for s in map(re.escape, ends_with_nonalpha)), - )) - - p.float_literal = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)") - p.space = oneOf(self._space_widths)("space") - - p.style_literal = oneOf( - [str(e.value) for e in self._MathStyle])("style_literal") - - p.symbol = Regex( - r"[a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|\U00000080-\U0001ffff]" - r"|\\[%${}\[\]_|]" - + r"|\\(?:{})(?![A-Za-z])".format( - "|".join(map(re.escape, tex2uni))) - )("sym").leaveWhitespace() - p.unknown_symbol = Regex(r"\\[A-Za-z]*")("name") - - p.font = csnames("font", self._fontnames) - p.start_group = ( - Optional(r"\math" + oneOf(self._fontnames)("font")) + "{") - p.end_group = Literal("}") - - p.delim = oneOf(self._delims) - - set_names_and_parse_actions() # for root definitions. - - # Mutually recursive definitions. (Minimizing the number of Forward - # elements is important for speed.) - p.accent = Forward() - p.auto_delim = Forward() - p.binom = Forward() - p.customspace = Forward() - p.frac = Forward() - p.dfrac = Forward() - p.function = Forward() - p.genfrac = Forward() - p.group = Forward() - p.operatorname = Forward() - p.overline = Forward() - p.overset = Forward() - p.placeable = Forward() - p.required_group = Forward() - p.simple = Forward() - p.optional_group = Forward() - p.sqrt = Forward() - p.subsuper = Forward() - p.token = Forward() - p.underset = Forward() - - set_names_and_parse_actions() # for mutually recursive definitions. - - p.customspace <<= cmd(r"\hspace", "{" + p.float_literal("space") + "}") - - p.accent <<= ( - csnames("accent", [*self._accent_map, *self._wide_accents]) - - p.placeable("sym")) - - p.function <<= csnames("name", self._function_names) - p.operatorname <<= cmd( - r"\operatorname", - "{" + ZeroOrMore(p.simple | p.unknown_symbol)("name") + "}") - - p.group <<= p.start_group + ZeroOrMore(p.token)("group") + p.end_group - - p.optional_group <<= "{" + ZeroOrMore(p.token)("group") + "}" - p.required_group <<= "{" + OneOrMore(p.token)("group") + "}" - - p.frac <<= cmd( - r"\frac", p.required_group("num") + p.required_group("den")) - p.dfrac <<= cmd( - r"\dfrac", p.required_group("num") + p.required_group("den")) - p.binom <<= cmd( - r"\binom", p.required_group("num") + p.required_group("den")) - - p.genfrac <<= cmd( - r"\genfrac", - "{" + Optional(p.delim)("ldelim") + "}" - + "{" + Optional(p.delim)("rdelim") + "}" - + "{" + p.float_literal("rulesize") + "}" - + "{" + Optional(p.style_literal)("style") + "}" - + p.required_group("num") - + p.required_group("den")) - - p.sqrt <<= cmd( - r"\sqrt{value}", - Optional("[" + OneOrMore(NotAny("]") + p.token)("root") + "]") - + p.required_group("value")) - - p.overline <<= cmd(r"\overline", p.required_group("body")) - - p.overset <<= cmd( - r"\overset", - p.optional_group("annotation") + p.optional_group("body")) - p.underset <<= cmd( - r"\underset", - p.optional_group("annotation") + p.optional_group("body")) - - p.placeable <<= ( - p.accent # Must be before symbol as all accents are symbols - | p.symbol # Must be second to catch all named symbols and single - # chars not in a group - | p.function - | p.operatorname - | p.group - | p.frac - | p.dfrac - | p.binom - | p.genfrac - | p.overset - | p.underset - | p.sqrt - | p.overline - ) - - p.simple <<= ( - p.space - | p.customspace - | p.font - | p.subsuper - ) - - p.subsuper <<= ( - (Optional(p.placeable)("nucleus") - + OneOrMore(oneOf(["_", "^"]) - p.placeable)("subsuper") - + Regex("'*")("apostrophes")) - | Regex("'+")("apostrophes") - | (p.placeable("nucleus") + Regex("'*")("apostrophes")) - ) - - p.token <<= ( - p.simple - | p.auto_delim - | p.unknown_symbol # Must be last - ) - - p.auto_delim <<= ( - r"\left" - (p.delim("left") | Error("Expected a delimiter")) - + ZeroOrMore(p.simple | p.auto_delim)("mid") - + r"\right" - (p.delim("right") | Error("Expected a delimiter")) - ) - - # Leaf definitions. - p.math = OneOrMore(p.token) - p.math_string = QuotedString('$', '\\', unquoteResults=False) - p.non_math = Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace() - p.main = ( - p.non_math + ZeroOrMore(p.math_string + p.non_math) + StringEnd() - ) - set_names_and_parse_actions() # for leaf definitions. - - self._expression = p.main - self._math_expression = p.math - - # To add space to nucleus operators after sub/superscripts - self._in_subscript_or_superscript = False - - def parse(self, s, fonts_object, fontsize, dpi): - """ - Parse expression *s* using the given *fonts_object* for - output, at the given *fontsize* and *dpi*. - - Returns the parse tree of `Node` instances. - """ - self._state_stack = [ - ParserState(fonts_object, 'default', 'rm', fontsize, dpi)] - self._em_width_cache = {} - try: - result = self._expression.parseString(s) - except ParseBaseException as err: - # explain becomes a plain method on pyparsing 3 (err.explain(0)). - raise ValueError("\n" + ParseException.explain(err, 0)) from None - self._state_stack = None - self._in_subscript_or_superscript = False - # prevent operator spacing from leaking into a new expression - self._em_width_cache = {} - self._expression.resetCache() - return result[0] - - def get_state(self): - """Get the current `State` of the parser.""" - return self._state_stack[-1] - - def pop_state(self): - """Pop a `State` off of the stack.""" - self._state_stack.pop() - - def push_state(self): - """Push a new `State` onto the stack, copying the current state.""" - self._state_stack.append(self.get_state().copy()) - - def main(self, s, loc, toks): - return [Hlist(toks)] - - def math_string(self, s, loc, toks): - return self._math_expression.parseString(toks[0][1:-1]) - - def math(self, s, loc, toks): - hlist = Hlist(toks) - self.pop_state() - return [hlist] - - def non_math(self, s, loc, toks): - s = toks[0].replace(r'\$', '$') - symbols = [Char(c, self.get_state()) for c in s] - hlist = Hlist(symbols) - # We're going into math now, so set font to 'it' - self.push_state() - self.get_state().font = mpl.rcParams['mathtext.default'] - return [hlist] - - float_literal = staticmethod(pyparsing_common.convertToFloat) - - def _make_space(self, percentage): - # In TeX, an em (the unit usually used to measure horizontal lengths) - # is not the width of the character 'm'; it is the same in different - # font styles (e.g. roman or italic). Mathtext, however, uses 'm' in - # the italic style so that horizontal spaces don't depend on the - # current font style. - state = self.get_state() - key = (state.font, state.fontsize, state.dpi) - width = self._em_width_cache.get(key) - if width is None: - metrics = state.fontset.get_metrics( - 'it', mpl.rcParams['mathtext.default'], 'm', - state.fontsize, state.dpi) - width = metrics.advance - self._em_width_cache[key] = width - return Kern(width * percentage) - - _space_widths = { - r'\,': 0.16667, # 3/18 em = 3 mu - r'\thinspace': 0.16667, # 3/18 em = 3 mu - r'\/': 0.16667, # 3/18 em = 3 mu - r'\>': 0.22222, # 4/18 em = 4 mu - r'\:': 0.22222, # 4/18 em = 4 mu - r'\;': 0.27778, # 5/18 em = 5 mu - r'\ ': 0.33333, # 6/18 em = 6 mu - r'~': 0.33333, # 6/18 em = 6 mu, nonbreakable - r'\enspace': 0.5, # 9/18 em = 9 mu - r'\quad': 1, # 1 em = 18 mu - r'\qquad': 2, # 2 em = 36 mu - r'\!': -0.16667, # -3/18 em = -3 mu - } - - def space(self, s, loc, toks): - num = self._space_widths[toks["space"]] - box = self._make_space(num) - return [box] - - def customspace(self, s, loc, toks): - return [self._make_space(toks["space"])] - - def symbol(self, s, loc, toks): - c = toks["sym"] - if c == "-": - # "U+2212 minus sign is the preferred representation of the unary - # and binary minus sign rather than the ASCII-derived U+002D - # hyphen-minus, because minus sign is unambiguous and because it - # is rendered with a more desirable length, usually longer than a - # hyphen." (https://www.unicode.org/reports/tr25/) - c = "\N{MINUS SIGN}" - try: - char = Char(c, self.get_state()) - except ValueError as err: - raise ParseFatalException(s, loc, - "Unknown symbol: %s" % c) from err - - if c in self._spaced_symbols: - # iterate until we find previous character, needed for cases - # such as ${ -2}$, $ -2$, or $ -2$. - prev_char = next((c for c in s[:loc][::-1] if c != ' '), '') - # Binary operators at start of string should not be spaced - if (c in self._binary_operators and - (len(s[:loc].split()) == 0 or prev_char == '{' or - prev_char in self._left_delims)): - return [char] - else: - return [Hlist([self._make_space(0.2), - char, - self._make_space(0.2)], - do_kern=True)] - elif c in self._punctuation_symbols: - prev_char = next((c for c in s[:loc][::-1] if c != ' '), '') - next_char = next((c for c in s[loc + 1:] if c != ' '), '') - - # Do not space commas between brackets - if c == ',': - if prev_char == '{' and next_char == '}': - return [char] - - # Do not space dots as decimal separators - if c == '.' and prev_char.isdigit() and next_char.isdigit(): - return [char] - else: - return [Hlist([char, self._make_space(0.2)], do_kern=True)] - return [char] - - def unknown_symbol(self, s, loc, toks): - raise ParseFatalException(s, loc, f"Unknown symbol: {toks['name']}") - - _accent_map = { - r'hat': r'\circumflexaccent', - r'breve': r'\combiningbreve', - r'bar': r'\combiningoverline', - r'grave': r'\combininggraveaccent', - r'acute': r'\combiningacuteaccent', - r'tilde': r'\combiningtilde', - r'dot': r'\combiningdotabove', - r'ddot': r'\combiningdiaeresis', - r'dddot': r'\combiningthreedotsabove', - r'ddddot': r'\combiningfourdotsabove', - r'vec': r'\combiningrightarrowabove', - r'"': r'\combiningdiaeresis', - r"`": r'\combininggraveaccent', - r"'": r'\combiningacuteaccent', - r'~': r'\combiningtilde', - r'.': r'\combiningdotabove', - r'^': r'\circumflexaccent', - r'overrightarrow': r'\rightarrow', - r'overleftarrow': r'\leftarrow', - r'mathring': r'\circ', - } - - _wide_accents = set(r"widehat widetilde widebar".split()) - - def accent(self, s, loc, toks): - state = self.get_state() - thickness = state.get_current_underline_thickness() - accent = toks["accent"] - sym = toks["sym"] - if accent in self._wide_accents: - accent_box = AutoWidthChar( - '\\' + accent, sym.width, state, char_class=Accent) - else: - accent_box = Accent(self._accent_map[accent], state) - if accent == 'mathring': - accent_box.shrink() - accent_box.shrink() - centered = HCentered([Hbox(sym.width / 4.0), accent_box]) - centered.hpack(sym.width, 'exactly') - return Vlist([ - centered, - Vbox(0., thickness * 2.0), - Hlist([sym]) - ]) - - def function(self, s, loc, toks): - hlist = self.operatorname(s, loc, toks) - hlist.function_name = toks["name"] - return hlist - - def operatorname(self, s, loc, toks): - self.push_state() - state = self.get_state() - state.font = 'rm' - hlist_list = [] - # Change the font of Chars, but leave Kerns alone - name = toks["name"] - for c in name: - if isinstance(c, Char): - c.font = 'rm' - c._update_metrics() - hlist_list.append(c) - elif isinstance(c, str): - hlist_list.append(Char(c, state)) - else: - hlist_list.append(c) - next_char_loc = loc + len(name) + 1 - if isinstance(name, ParseResults): - next_char_loc += len('operatorname{}') - next_char = next((c for c in s[next_char_loc:] if c != ' '), '') - delimiters = self._delims | {'^', '_'} - if (next_char not in delimiters and - name not in self._overunder_functions): - # Add thin space except when followed by parenthesis, bracket, etc. - hlist_list += [self._make_space(self._space_widths[r'\,'])] - self.pop_state() - # if followed by a super/subscript, set flag to true - # This flag tells subsuper to add space after this operator - if next_char in {'^', '_'}: - self._in_subscript_or_superscript = True - else: - self._in_subscript_or_superscript = False - - return Hlist(hlist_list) - - def start_group(self, s, loc, toks): - self.push_state() - # Deal with LaTeX-style font tokens - if toks.get("font"): - self.get_state().font = toks.get("font") - return [] - - def group(self, s, loc, toks): - grp = Hlist(toks.get("group", [])) - return [grp] - - def required_group(self, s, loc, toks): - return Hlist(toks.get("group", [])) - - optional_group = required_group - - def end_group(self, s, loc, toks): - self.pop_state() - return [] - - def font(self, s, loc, toks): - self.get_state().font = toks["font"] - return [] - - def is_overunder(self, nucleus): - if isinstance(nucleus, Char): - return nucleus.c in self._overunder_symbols - elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'): - return nucleus.function_name in self._overunder_functions - return False - - def is_dropsub(self, nucleus): - if isinstance(nucleus, Char): - return nucleus.c in self._dropsub_symbols - return False - - def is_slanted(self, nucleus): - if isinstance(nucleus, Char): - return nucleus.is_slanted() - return False - - def is_between_brackets(self, s, loc): - return False - - def subsuper(self, s, loc, toks): - nucleus = toks.get("nucleus", Hbox(0)) - subsuper = toks.get("subsuper", []) - napostrophes = len(toks.get("apostrophes", [])) - - if not subsuper and not napostrophes: - return nucleus - - sub = super = None - while subsuper: - op, arg, *subsuper = subsuper - if op == '_': - if sub is not None: - raise ParseFatalException("Double subscript") - sub = arg - else: - if super is not None: - raise ParseFatalException("Double superscript") - super = arg - - state = self.get_state() - rule_thickness = state.fontset.get_underline_thickness( - state.font, state.fontsize, state.dpi) - xHeight = state.fontset.get_xheight( - state.font, state.fontsize, state.dpi) - - if napostrophes: - if super is None: - super = Hlist([]) - for i in range(napostrophes): - super.children.extend(self.symbol(s, loc, {"sym": "\\prime"})) - # kern() and hpack() needed to get the metrics right after - # extending - super.kern() - super.hpack() - - # Handle over/under symbols, such as sum or prod - if self.is_overunder(nucleus): - vlist = [] - shift = 0. - width = nucleus.width - if super is not None: - super.shrink() - width = max(width, super.width) - if sub is not None: - sub.shrink() - width = max(width, sub.width) - - vgap = rule_thickness * 3.0 - if super is not None: - hlist = HCentered([super]) - hlist.hpack(width, 'exactly') - vlist.extend([hlist, Vbox(0, vgap)]) - hlist = HCentered([nucleus]) - hlist.hpack(width, 'exactly') - vlist.append(hlist) - if sub is not None: - hlist = HCentered([sub]) - hlist.hpack(width, 'exactly') - vlist.extend([Vbox(0, vgap), hlist]) - shift = hlist.height + vgap + nucleus.depth - vlist = Vlist(vlist) - vlist.shift_amount = shift - result = Hlist([vlist]) - return [result] - - # We remove kerning on the last character for consistency (otherwise - # it will compute kerning based on non-shrunk characters and may put - # them too close together when superscripted) - # We change the width of the last character to match the advance to - # consider some fonts with weird metrics: e.g. stix's f has a width of - # 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put - # the superscript at the advance - last_char = nucleus - if isinstance(nucleus, Hlist): - new_children = nucleus.children - if len(new_children): - # remove last kern - if (isinstance(new_children[-1], Kern) and - hasattr(new_children[-2], '_metrics')): - new_children = new_children[:-1] - last_char = new_children[-1] - if hasattr(last_char, '_metrics'): - last_char.width = last_char._metrics.advance - # create new Hlist without kerning - nucleus = Hlist(new_children, do_kern=False) - else: - if isinstance(nucleus, Char): - last_char.width = last_char._metrics.advance - nucleus = Hlist([nucleus]) - - # Handle regular sub/superscripts - constants = _get_font_constant_set(state) - lc_height = last_char.height - lc_baseline = 0 - if self.is_dropsub(last_char): - lc_baseline = last_char.depth - - # Compute kerning for sub and super - superkern = constants.delta * xHeight - subkern = constants.delta * xHeight - if self.is_slanted(last_char): - superkern += constants.delta * xHeight - superkern += (constants.delta_slanted * - (lc_height - xHeight * 2. / 3.)) - if self.is_dropsub(last_char): - subkern = (3 * constants.delta - - constants.delta_integral) * lc_height - superkern = (3 * constants.delta + - constants.delta_integral) * lc_height - else: - subkern = 0 - - if super is None: - # node757 - x = Hlist([Kern(subkern), sub]) - x.shrink() - if self.is_dropsub(last_char): - shift_down = lc_baseline + constants.subdrop * xHeight - else: - shift_down = constants.sub1 * xHeight - x.shift_amount = shift_down - else: - x = Hlist([Kern(superkern), super]) - x.shrink() - if self.is_dropsub(last_char): - shift_up = lc_height - constants.subdrop * xHeight - else: - shift_up = constants.sup1 * xHeight - if sub is None: - x.shift_amount = -shift_up - else: # Both sub and superscript - y = Hlist([Kern(subkern), sub]) - y.shrink() - if self.is_dropsub(last_char): - shift_down = lc_baseline + constants.subdrop * xHeight - else: - shift_down = constants.sub2 * xHeight - # If sub and superscript collide, move super up - clr = (2.0 * rule_thickness - - ((shift_up - x.depth) - (y.height - shift_down))) - if clr > 0.: - shift_up += clr - x = Vlist([ - x, - Kern((shift_up - x.depth) - (y.height - shift_down)), - y]) - x.shift_amount = shift_down - - if not self.is_dropsub(last_char): - x.width += constants.script_space * xHeight - - # Do we need to add a space after the nucleus? - # To find out, check the flag set by operatorname - spaced_nucleus = [nucleus, x] - if self._in_subscript_or_superscript: - spaced_nucleus += [self._make_space(self._space_widths[r'\,'])] - self._in_subscript_or_superscript = False - - result = Hlist(spaced_nucleus) - return [result] - - def _genfrac(self, ldelim, rdelim, rule, style, num, den): - state = self.get_state() - thickness = state.get_current_underline_thickness() - - for _ in range(style.value): - num.shrink() - den.shrink() - cnum = HCentered([num]) - cden = HCentered([den]) - width = max(num.width, den.width) - cnum.hpack(width, 'exactly') - cden.hpack(width, 'exactly') - vlist = Vlist([cnum, # numerator - Vbox(0, thickness * 2.0), # space - Hrule(state, rule), # rule - Vbox(0, thickness * 2.0), # space - cden # denominator - ]) - - # Shift so the fraction line sits in the middle of the - # equals sign - metrics = state.fontset.get_metrics( - state.font, mpl.rcParams['mathtext.default'], - '=', state.fontsize, state.dpi) - shift = (cden.height - - ((metrics.ymax + metrics.ymin) / 2 - - thickness * 3.0)) - vlist.shift_amount = shift - - result = [Hlist([vlist, Hbox(thickness * 2.)])] - if ldelim or rdelim: - if ldelim == '': - ldelim = '.' - if rdelim == '': - rdelim = '.' - return self._auto_sized_delimiter(ldelim, result, rdelim) - return result - - def style_literal(self, s, loc, toks): - return self._MathStyle(int(toks["style_literal"])) - - def genfrac(self, s, loc, toks): - return self._genfrac( - toks.get("ldelim", ""), toks.get("rdelim", ""), - toks["rulesize"], toks.get("style", self._MathStyle.TEXTSTYLE), - toks["num"], toks["den"]) - - def frac(self, s, loc, toks): - return self._genfrac( - "", "", self.get_state().get_current_underline_thickness(), - self._MathStyle.TEXTSTYLE, toks["num"], toks["den"]) - - def dfrac(self, s, loc, toks): - return self._genfrac( - "", "", self.get_state().get_current_underline_thickness(), - self._MathStyle.DISPLAYSTYLE, toks["num"], toks["den"]) - - def binom(self, s, loc, toks): - return self._genfrac( - "(", ")", 0, - self._MathStyle.TEXTSTYLE, toks["num"], toks["den"]) - - def _genset(self, s, loc, toks): - annotation = toks["annotation"] - body = toks["body"] - thickness = self.get_state().get_current_underline_thickness() - - annotation.shrink() - cannotation = HCentered([annotation]) - cbody = HCentered([body]) - width = max(cannotation.width, cbody.width) - cannotation.hpack(width, 'exactly') - cbody.hpack(width, 'exactly') - - vgap = thickness * 3 - if s[loc + 1] == "u": # \underset - vlist = Vlist([cbody, # body - Vbox(0, vgap), # space - cannotation # annotation - ]) - # Shift so the body sits in the same vertical position - vlist.shift_amount = cbody.depth + cannotation.height + vgap - else: # \overset - vlist = Vlist([cannotation, # annotation - Vbox(0, vgap), # space - cbody # body - ]) - - # To add horizontal gap between symbols: wrap the Vlist into - # an Hlist and extend it with an Hbox(0, horizontal_gap) - return vlist - - overset = underset = _genset - - def sqrt(self, s, loc, toks): - root = toks.get("root") - body = toks["value"] - state = self.get_state() - thickness = state.get_current_underline_thickness() - - # Determine the height of the body, and add a little extra to - # the height so it doesn't seem cramped - height = body.height - body.shift_amount + thickness * 5.0 - depth = body.depth + body.shift_amount - check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True) - height = check.height - check.shift_amount - depth = check.depth + check.shift_amount - - # Put a little extra space to the left and right of the body - padded_body = Hlist([Hbox(2 * thickness), body, Hbox(2 * thickness)]) - rightside = Vlist([Hrule(state), Glue('fill'), padded_body]) - # Stretch the glue between the hrule and the body - rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), - 'exactly', depth) - - # Add the root and shift it upward so it is above the tick. - # The value of 0.6 is a hard-coded hack ;) - if not root: - root = Box(check.width * 0.5, 0., 0.) - else: - root = Hlist(root) - root.shrink() - root.shrink() - - root_vlist = Vlist([Hlist([root])]) - root_vlist.shift_amount = -height * 0.6 - - hlist = Hlist([root_vlist, # Root - # Negative kerning to put root over tick - Kern(-check.width * 0.5), - check, # Check - rightside]) # Body - return [hlist] - - def overline(self, s, loc, toks): - body = toks["body"] - - state = self.get_state() - thickness = state.get_current_underline_thickness() - - height = body.height - body.shift_amount + thickness * 3.0 - depth = body.depth + body.shift_amount - - # Place overline above body - rightside = Vlist([Hrule(state), Glue('fill'), Hlist([body])]) - - # Stretch the glue between the hrule and the body - rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), - 'exactly', depth) - - hlist = Hlist([rightside]) - return [hlist] - - def _auto_sized_delimiter(self, front, middle, back): - state = self.get_state() - if len(middle): - height = max(x.height for x in middle) - depth = max(x.depth for x in middle) - factor = None - else: - height = 0 - depth = 0 - factor = 1.0 - parts = [] - # \left. and \right. aren't supposed to produce any symbols - if front != '.': - parts.append( - AutoHeightChar(front, height, depth, state, factor=factor)) - parts.extend(middle) - if back != '.': - parts.append( - AutoHeightChar(back, height, depth, state, factor=factor)) - hlist = Hlist(parts) - return hlist - - def auto_delim(self, s, loc, toks): - return self._auto_sized_delimiter( - toks["left"], - # if "mid" in toks ... can be removed when requiring pyparsing 3. - toks["mid"].asList() if "mid" in toks else [], - toks["right"]) diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/multibox_loss.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/multibox_loss.py deleted file mode 100644 index 75d2367be35e11a119810949f6ccce439984b978..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/multibox_loss.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -from utils_faces.box_utils import match, log_sum_exp -from data_faces import cfg_mnet -GPU = cfg_mnet['gpu_train'] - -class MultiBoxLoss(nn.Module): - """SSD Weighted Loss Function - Compute Targets: - 1) Produce Confidence Target Indices by matching ground truth boxes - with (default) 'priorboxes' that have jaccard index > threshold parameter - (default threshold: 0.5). - 2) Produce localization target by 'encoding' variance into offsets of ground - truth boxes and their matched 'priorboxes'. - 3) Hard negative mining to filter the excessive number of negative examples - that comes with using a large number of default bounding boxes. - (default negative:positive ratio 3:1) - Objective Loss: - L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss - weighted by α which is set to 1 by cross val. - Args: - c: class confidences, - l: predicted boxes, - g: ground truth boxes - N: number of matched default boxes - See: https://arxiv.org/pdf/1512.02325.pdf for more details. - """ - - def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target): - super(MultiBoxLoss, self).__init__() - self.num_classes = num_classes - self.threshold = overlap_thresh - self.background_label = bkg_label - self.encode_target = encode_target - self.use_prior_for_matching = prior_for_matching - self.do_neg_mining = neg_mining - self.negpos_ratio = neg_pos - self.neg_overlap = neg_overlap - self.variance = [0.1, 0.2] - - def forward(self, predictions, priors, targets): - """Multibox Loss - Args: - predictions (tuple): A tuple containing loc preds, conf preds, - and prior boxes from SSD net. - conf shape: torch.size(batch_size,num_priors,num_classes) - loc shape: torch.size(batch_size,num_priors,4) - priors shape: torch.size(num_priors,4) - - ground_truth (tensor): Ground truth boxes and labels for a batch, - shape: [batch_size,num_objs,5] (last idx is the label). - """ - - loc_data, conf_data, landm_data = predictions - priors = priors - num = loc_data.size(0) - num_priors = (priors.size(0)) - - # match priors (default boxes) and ground truth boxes - loc_t = torch.Tensor(num, num_priors, 4) - landm_t = torch.Tensor(num, num_priors, 10) - conf_t = torch.LongTensor(num, num_priors) - for idx in range(num): - truths = targets[idx][:, :4].data - labels = targets[idx][:, -1].data - landms = targets[idx][:, 4:14].data - defaults = priors.data - match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx) - if GPU: - loc_t = loc_t.cuda() - conf_t = conf_t.cuda() - landm_t = landm_t.cuda() - - zeros = torch.tensor(0).cuda() - # landm Loss (Smooth L1) - # Shape: [batch,num_priors,10] - pos1 = conf_t > zeros - num_pos_landm = pos1.long().sum(1, keepdim=True) - N1 = max(num_pos_landm.data.sum().float(), 1) - pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data) - landm_p = landm_data[pos_idx1].view(-1, 10) - landm_t = landm_t[pos_idx1].view(-1, 10) - loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum') - - - pos = conf_t != zeros - conf_t[pos] = 1 - - # Localization Loss (Smooth L1) - # Shape: [batch,num_priors,4] - pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) - loc_p = loc_data[pos_idx].view(-1, 4) - loc_t = loc_t[pos_idx].view(-1, 4) - loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') - - # Compute max conf across batch for hard negative mining - batch_conf = conf_data.view(-1, self.num_classes) - loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) - - # Hard Negative Mining - loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now - loss_c = loss_c.view(num, -1) - _, loss_idx = loss_c.sort(1, descending=True) - _, idx_rank = loss_idx.sort(1) - num_pos = pos.long().sum(1, keepdim=True) - num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1) - neg = idx_rank < num_neg.expand_as(idx_rank) - - # Confidence Loss Including Positive and Negative Examples - pos_idx = pos.unsqueeze(2).expand_as(conf_data) - neg_idx = neg.unsqueeze(2).expand_as(conf_data) - conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes) - targets_weighted = conf_t[(pos+neg).gt(0)] - loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') - - # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - N = max(num_pos.data.sum().float(), 1) - loss_l /= N - loss_c /= N - loss_landm /= N1 - - return loss_l, loss_c, loss_landm diff --git a/spaces/leafShen/CodeFormer/CodeFormer/facelib/detection/__init__.py b/spaces/leafShen/CodeFormer/CodeFormer/facelib/detection/__init__.py deleted file mode 100644 index 296262d4e2e29eaa2afba7bda1f0399d77da24f6..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/facelib/detection/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -import torch -from torch import nn -from copy import deepcopy - -from facelib.utils import load_file_from_url -from facelib.utils import download_pretrained_models -from facelib.detection.yolov5face.models.common import Conv - -from .retinaface.retinaface import RetinaFace -from .yolov5face.face_detector import YoloDetector - - -def init_detection_model(model_name, half=False, device='cuda'): - if 'retinaface' in model_name: - model = init_retinaface_model(model_name, half, device) - elif 'YOLOv5' in model_name: - model = init_yolov5face_model(model_name, device) - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - return model - - -def init_retinaface_model(model_name, half=False, device='cuda'): - if model_name == 'retinaface_resnet50': - model = RetinaFace(network_name='resnet50', half=half) - model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth' - elif model_name == 'retinaface_mobile0.25': - model = RetinaFace(network_name='mobile0.25', half=half) - model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) - load_net = torch.load(model_path, map_location=lambda storage, loc: storage) - # remove unnecessary 'module.' - for k, v in deepcopy(load_net).items(): - if k.startswith('module.'): - load_net[k[7:]] = v - load_net.pop(k) - model.load_state_dict(load_net, strict=True) - model.eval() - model = model.to(device) - - return model - - -def init_yolov5face_model(model_name, device='cuda'): - if model_name == 'YOLOv5l': - model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth' - elif model_name == 'YOLOv5n': - model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) - load_net = torch.load(model_path, map_location=lambda storage, loc: storage) - model.detector.load_state_dict(load_net, strict=True) - model.detector.eval() - model.detector = model.detector.to(device).float() - - for m in model.detector.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True # pytorch 1.7.0 compatibility - elif isinstance(m, Conv): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - - return model - - -# Download from Google Drive -# def init_yolov5face_model(model_name, device='cuda'): -# if model_name == 'YOLOv5l': -# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device) -# f_id = {'yolov5l-face.pth': '131578zMA6B2x8VQHyHfa6GEPtulMCNzV'} -# elif model_name == 'YOLOv5n': -# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device) -# f_id = {'yolov5n-face.pth': '1fhcpFvWZqghpGXjYPIne2sw1Fy4yhw6o'} -# else: -# raise NotImplementedError(f'{model_name} is not implemented.') - -# model_path = os.path.join('weights/facelib', list(f_id.keys())[0]) -# if not os.path.exists(model_path): -# download_pretrained_models(file_ids=f_id, save_path_root='weights/facelib') - -# load_net = torch.load(model_path, map_location=lambda storage, loc: storage) -# model.detector.load_state_dict(load_net, strict=True) -# model.detector.eval() -# model.detector = model.detector.to(device).float() - -# for m in model.detector.modules(): -# if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: -# m.inplace = True # pytorch 1.7.0 compatibility -# elif isinstance(m, Conv): -# m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - -# return model \ No newline at end of file diff --git a/spaces/leurez/moss/src/hooks/useChatTip/useChatTip.ts b/spaces/leurez/moss/src/hooks/useChatTip/useChatTip.ts deleted file mode 100644 index bb9406bad1dc797de6ed17404f746b584387d502..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/src/hooks/useChatTip/useChatTip.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { onMounted, ref } from 'vue' -import { TIP_THRESHOLD, strNum2Num } from './common.js' - -const countCacheKey = '__chat_tip_count__' - -export function useChatTip() { - const tipCount = ref(0) - - onMounted(() => { - forceSyncCount() - }) - - /** 从localStorage中同步count */ - function forceSyncCount() { - const cacheCount = strNum2Num(localStorage.getItem(countCacheKey) || '0') - - if (cacheCount !== tipCount.value) - tipCount.value = cacheCount - } - - function increase() { - tipCount.value++ - - if (tipCount.value > TIP_THRESHOLD) - tipCount.value = 0 - - localStorage.setItem(countCacheKey, String(tipCount.value)) - } - - return { - increase, - count: tipCount, - } -} diff --git a/spaces/lewiswu1209/MockingBird/synthesizer/gst_hyperparameters.py b/spaces/lewiswu1209/MockingBird/synthesizer/gst_hyperparameters.py deleted file mode 100644 index 1403144651853135489c4a42d3c0f52bd0f87664..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/synthesizer/gst_hyperparameters.py +++ /dev/null @@ -1,13 +0,0 @@ -class GSTHyperparameters(): - E = 512 - - # reference encoder - ref_enc_filters = [32, 32, 64, 64, 128, 128] - - # style token layer - token_num = 10 - # token_emb_size = 256 - num_heads = 8 - - n_mels = 256 # Number of Mel banks to generate - diff --git a/spaces/lightli/bingo-newbing/tailwind.config.js b/spaces/lightli/bingo-newbing/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/lightli/bingo-newbing/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Crack !LINK! Lightwave 10.1 Build 2161 (x32 And X64 Crack !LINK!).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Crack !LINK! Lightwave 10.1 Build 2161 (x32 And X64 Crack !LINK!).md deleted file mode 100644 index fa734307393823a715c1dd20eb3db851debf2a81..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Crack !LINK! Lightwave 10.1 Build 2161 (x32 And X64 Crack !LINK!).md +++ /dev/null @@ -1,7 +0,0 @@ -
-

lightwave crack is designed for independent artists who want to control their vision from start to finish. affordable, comprehensive, and easy to learn and use, lightwave is one of the leading 3d systems on the market. with the ability to create, animate, and view incredibly realistic images and animations, lightwave allows you to create new worlds, build your brand, and break expectations. lightwave 2022 refined and updated. it is a complete 3d modeling, rendering, and animation solution. also, it is widely used in television production, movie visual effects, video game development, printed graphics, and visualization. it is responsible for giving artists more awards than any other 3d application.

-

its pre-rendered atmospheric engine allows you to import and render an openvdb fog volume. new for lightwave 3d 2019 is a set of node tools that allow openvdb content creation. you can now create openvdb grids from meshes, particles, and even the shape primitive distance estimator. the tool kit allows for live constructive solid geometry (csg) operations, grid filtering including level set tracking, smoothing, dilation and erosion. solvers are included that allow for fluid simulation and smoke and fire effects.

-

CRACK Lightwave 10.1 build 2161 (x32 and x64 crack)


Downloadhttps://bytlly.com/2uGyrC



-

lightwave’s most advanced feature is the complete control over the effect with the screen space geometry brush. this allows the animator to paint the screen space geometry in an intuitive manner. the technology builds on and provides fine-grained control over the quality and effectiveness of the displacement surface. the new brush’s ability to apply displacement to a surface enables unique workflow solutions that are not otherwise possible.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Hurco Winmax 9 Crack.epub.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Hurco Winmax 9 Crack.epub.md deleted file mode 100644 index 088669b82ffec599525ebdcd5bf12541c200cc9c..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Hurco Winmax 9 Crack.epub.md +++ /dev/null @@ -1,42 +0,0 @@ -

Hurco Winmax 9 Crack.epub


Download Ziphttps://bytlly.com/2uGvV1



-
-://cyborgnews.ru/graphics/dec-winmax-9-83-3-7/ - - win9.diz.to - -. - - ://www.1disk.com/pictures/windows-8-1-update-free-iso-64-bit-file/ - - hahahha - - :D - - ://filefactory.com/jpg/Windows%201%2C%20Windows%208%20%28iso%29%20-%20original.jpg - - ://win9.dev-cabin.net/mediacenter/windows-8-windows-media-center-os-tour/ - - ://socal.pcworld.com/gb/2012/07/19/2801761/windows-8-1-os-release-date-available-download-i.html - - hahaha - - ://www.windowsntutor.com/win-81-windows-media-center-8-1-windows-8-1-review/ - - ://www.imfb.com/images/upload/2013/030617/windows-8-1010-goodbye-windows-8-1-dead-end.jpg - - NIGGER LOLOL - - d4nn3 r8m3 l33t - - ://content.naver.com/wp-content/uploads/2013/07/windows-8-8-1-goes-live.png - - ://www.pcworld.com/gb/2013/03/21/windows-8-1-review-the-new-face-of-microsoft-in-windows-8-8-1-64075.html - - n3w3rd3 w0rd3 - - jr0n3 - - ://t 4fefd39f24
-
-
-

diff --git a/spaces/lithiumice/SadTalker/src/face3d/visualize.py b/spaces/lithiumice/SadTalker/src/face3d/visualize.py deleted file mode 100644 index 23a1110806a0ddf37d4aa549c023d1c3f7114e3e..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/visualize.py +++ /dev/null @@ -1,48 +0,0 @@ -# check the sync of 3dmm feature and the audio -import cv2 -import numpy as np -from src.face3d.models.bfm import ParametricFaceModel -from src.face3d.models.facerecon_model import FaceReconModel -import torch -import subprocess, platform -import scipy.io as scio -from tqdm import tqdm - -# draft -def gen_composed_video(args, device, first_frame_coeff, coeff_path, audio_path, save_path, exp_dim=64): - - coeff_first = scio.loadmat(first_frame_coeff)['full_3dmm'] - - coeff_pred = scio.loadmat(coeff_path)['coeff_3dmm'] - - coeff_full = np.repeat(coeff_first, coeff_pred.shape[0], axis=0) # 257 - - coeff_full[:, 80:144] = coeff_pred[:, 0:64] - coeff_full[:, 224:227] = coeff_pred[:, 64:67] # 3 dim translation - coeff_full[:, 254:] = coeff_pred[:, 67:] # 3 dim translation - - tmp_video_path = '/tmp/face3dtmp.mp4' - - facemodel = FaceReconModel(args) - - video = cv2.VideoWriter(tmp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 25, (224, 224)) - - for k in tqdm(range(coeff_pred.shape[0]), 'face3d rendering:'): - cur_coeff_full = torch.tensor(coeff_full[k:k+1], device=device) - - facemodel.forward(cur_coeff_full, device) - - predicted_landmark = facemodel.pred_lm # TODO. - predicted_landmark = predicted_landmark.cpu().numpy().squeeze() - - rendered_img = facemodel.pred_face - rendered_img = 255. * rendered_img.cpu().numpy().squeeze().transpose(1,2,0) - out_img = rendered_img[:, :, :3].astype(np.uint8) - - video.write(np.uint8(out_img[:,:,::-1])) - - video.release() - - command = 'ffmpeg -v quiet -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio_path, tmp_video_path, save_path) - subprocess.call(command, shell=platform.system() != 'Windows') - diff --git a/spaces/ljjggr/bingo/src/lib/bots/bing/sr.ts b/spaces/ljjggr/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/longlh/longlh-agree-disagree-neutral-classifier/README.md b/spaces/longlh/longlh-agree-disagree-neutral-classifier/README.md deleted file mode 100644 index 0b87af5e3c94c577d51176bcc12c0d38d45d60e0..0000000000000000000000000000000000000000 --- a/spaces/longlh/longlh-agree-disagree-neutral-classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Longlh Agree Disagree Neutral Classifier -emoji: 🔥 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ltgoslo/ssa-perin/mtool/score/smatch.py b/spaces/ltgoslo/ssa-perin/mtool/score/smatch.py deleted file mode 100644 index e0361923de5bd68bfd8ef7f1453be67c7f4e0f69..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/mtool/score/smatch.py +++ /dev/null @@ -1,102 +0,0 @@ -import sys; - -import score.core; -from smatch.smatch import get_amr_match; - -def tuples(graph, prefix, values, faith = True): - # - # mimicry of get_triples() in amr.py - # - id = 0; - mapping = dict(); - instances = []; - relations = []; - attributes = []; - n = 0; - for node in graph.nodes: - mapping[node.id] = name = prefix + str(id); - id += 1; - if "anchors" in values and node.anchors is not None: - anchor = score.core.anchor(node); - if graph.input: anchor = score.core.explode(graph.input, anchor); - attributes.append(("anchor", name, str(anchor))); - if "labels" in values and node.label is not None: - instance = node.label; - else: - instance = "__()_{}__".format(prefix, n); - n += 1; - instances.append(("instance", name, instance)); - if "tops" in values and node.is_top: - # - # the native SMATCH code (wrongly, i believe) ties the top property to - # the node label (see https://github.com/cfmrp/mtool/issues/12). we get - # to choose whether to faithfully replicate those scores or not. - # - attributes.append(("TOP", name, - node.label if node.label and faith else "")); - if "properties" in values and node.properties and node.values: - for property, value in zip(node.properties, node.values): - attributes.append((property, name, value)); - for edge in graph.edges: - if "edges" in values: - relations.append((edge.lab, mapping[edge.src], mapping[edge.tgt])); - if "attributes" in values: - if edge.attributes and edge.values: - for attribute, value in zip(edge.attributes, edge.values): - relations.append((str((attribute, value)), - mapping[edge.src], mapping[edge.tgt])); - return instances, attributes, relations, n; - -def smatch(gold, system, limit = 20, values = {}, trace = 0, faith = True): - gprefix = "g"; sprefix = "s"; - ginstances, gattributes, grelations, gn \ - = tuples(gold, gprefix, values, faith); - sinstances, sattributes, srelations, sn \ - = tuples(system, sprefix, values, faith); - if trace > 1: - print("gold instances [{}]: {}\ngold attributes [{}]: {}\n" - "gold relations [{}]: {}" - "".format(len(ginstances), ginstances, - len(gattributes), gattributes, - len(grelations), grelations), - file = sys.stderr); - print("system instances [{}]: {}\nsystem attributes [{}]: {}\n" - "system relations [{}]: {}" - "".format(len(sinstances), sinstances, - len(sattributes), sattributes, - len(srelations), srelations), - file = sys.stderr); - correct, gold, system, mapping \ - = get_amr_match(None, None, gold.id, limit = limit, - instance1 = ginstances, attributes1 = gattributes, - relation1 = grelations, prefix1 = gprefix, - instance2 = sinstances, attributes2 = sattributes, - relation2 = srelations, prefix2 = sprefix); - return correct, gold - gn, system - sn, mapping; - -def evaluate(golds, systems, format = "json", limit = 20, - values = {}, trace = 0): - if limit is None or not limit > 0: limit = 20; - if trace > 1: print("RRHC limit: {}".format(limit), file = sys.stderr); - tg = ts = tc = n = 0; - scores = dict() if trace else None; - for gold, system in score.core.intersect(golds, systems): - id = gold.id; - correct, gold, system, mapping \ - = smatch(gold, system, limit, values, trace); - tg += gold; ts += system; tc += correct; - n += 1; - if trace: - if id in scores: - print("smatch.evaluate(): duplicate graph identifier: {}" - "".format(id), file = sys.stderr); - scores[id] = {"g": gold, "s": system, "c": correct}; - if trace > 1: - p, r, f = score.core.fscore(gold, system, correct); - print("G: {}; S: {}; C: {}; P: {}; R: {}; F: {}" - "".format(gold, system, correct, p, r, f), file = sys.stderr); - - p, r, f = score.core.fscore(tg, ts, tc); - result = {"n": n, "g": tg, "s": ts, "c": tc, "p": p, "r": r, "f": f}; - if trace: result["scores"] = scores; - return result; diff --git a/spaces/lwchen/CodeFormer/CodeFormer/facelib/utils/__init__.py b/spaces/lwchen/CodeFormer/CodeFormer/facelib/utils/__init__.py deleted file mode 100644 index f03b1c2bafcd7759cb7e8722a0c6715f201a46dc..0000000000000000000000000000000000000000 --- a/spaces/lwchen/CodeFormer/CodeFormer/facelib/utils/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back -from .misc import img2tensor, load_file_from_url, download_pretrained_models, scandir - -__all__ = [ - 'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', - 'download_pretrained_models', 'paste_face_back', 'img2tensor', 'scandir' -] diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/async/for_each.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/async/for_each.h deleted file mode 100644 index 08347f659279c487ac9ba5d29a6db3e4b9a6ddb9..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/async/for_each.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// The purpose of this header is to #include the async/for_each.h header of the -// sequential, host, and device systems. It should be #included in any code -// which uses ADL to dispatch async for_each. - -#pragma once - -#include - -//#include - -//#define __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/async/for_each.h> -//#include __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER -//#undef __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER - -#define __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/async/for_each.h> -#include __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER -#undef __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/sort.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/sort.h deleted file mode 100644 index 9d4ac199810cd7e8dcc815c8f90c43f36cb84d61..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/sort.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ - void sort(thrust::execution_policy &exec, - RandomAccessIterator first, - RandomAccessIterator last); - - -template -__host__ __device__ - void sort(thrust::execution_policy &exec, - RandomAccessIterator first, - RandomAccessIterator last, - StrictWeakOrdering comp); - - -template -__host__ __device__ - void sort_by_key(thrust::execution_policy &exec, - RandomAccessIterator1 keys_first, - RandomAccessIterator1 keys_last, - RandomAccessIterator2 values_first); - - -template -__host__ __device__ - void sort_by_key(thrust::execution_policy &exec, - RandomAccessIterator1 keys_first, - RandomAccessIterator1 keys_last, - RandomAccessIterator2 values_first, - StrictWeakOrdering comp); - - -template -__host__ __device__ - void stable_sort(thrust::execution_policy &exec, - RandomAccessIterator first, - RandomAccessIterator last); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ - void stable_sort(thrust::execution_policy &exec, - RandomAccessIterator first, - RandomAccessIterator last, - StrictWeakOrdering comp); - - -template -__host__ __device__ - void stable_sort_by_key(thrust::execution_policy &exec, - RandomAccessIterator1 keys_first, - RandomAccessIterator1 keys_last, - RandomAccessIterator2 values_first); - - -// XXX it is an error to call this function; it has no implementation -template -__host__ __device__ - void stable_sort_by_key(thrust::execution_policy &exec, - RandomAccessIterator1 keys_first, - RandomAccessIterator1 keys_last, - RandomAccessIterator2 values_first, - StrictWeakOrdering comp); - - -template -__host__ __device__ - bool is_sorted(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last); - - -template -__host__ __device__ - bool is_sorted(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Compare comp); - - -template -__host__ __device__ - ForwardIterator is_sorted_until(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last); - - -template -__host__ __device__ - ForwardIterator is_sorted_until(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Compare comp); - - -} // end generic -} // end detail -} // end system -} // end thrust - -#include - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/fill.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/fill.h deleted file mode 100644 index 20c636096c25e9a2d951ad2f50a4de72d0d1b968..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/fill.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special fill functions - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/vector.h b/spaces/ma-xu/LIVE/thrust/thrust/system/omp/vector.h deleted file mode 100644 index 101a22c7b2059d69dce14809f8761cfba345315f..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/vector.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/system/omp/vector.h - * \brief A dynamically-sizable array of elements which reside in memory available to - * Thrust's OpenMP system. - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ - -// forward declaration of host_vector -// XXX why is this here? it doesn't seem necessary for anything below -template class host_vector; - -namespace system -{ -namespace omp -{ - -/*! \p omp::vector is a container that supports random access to elements, - * constant time removal of elements at the end, and linear time insertion - * and removal of elements at the beginning or in the middle. The number of - * elements in a \p omp::vector may vary dynamically; memory management is - * automatic. The elements contained in an \p omp::vector reside in memory - * available to the \p omp system. - * - * \tparam T The element type of the \p omp::vector. - * \tparam Allocator The allocator type of the \p omp::vector. Defaults to \p omp::allocator. - * - * \see http://www.sgi.com/tech/stl/Vector.html - * \see host_vector For the documentation of the complete interface which is - * shared by \p omp::vector - * \see device_vector - */ -template > -using vector = thrust::detail::vector_base; - -} // end omp -} // end system - -// alias system::omp names at top-level -namespace omp -{ - -using thrust::system::omp::vector; - -} // end omp - -} // end thrust diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/run.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/run.py deleted file mode 100644 index d78448600473d74939d4a820e1b9910f46cc8034..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/run.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -import argparse -import shutil -import sys -from subprocess import call - -def run_cmd(command): - try: - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("--input_folder", type=str, default="./test_images/old", help="Test images") - parser.add_argument( - "--output_folder", - type=str, - default="./output", - help="Restored images, please use the absolute path", - ) - parser.add_argument("--GPU", type=str, default="6,7", help="0,1,2") - parser.add_argument( - "--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint" - ) - parser.add_argument("--with_scratch", action="store_true") - parser.add_argument("--HR", action='store_true') - opts = parser.parse_args() - - gpu1 = opts.GPU - - # resolve relative paths before changing directory - opts.input_folder = os.path.abspath(opts.input_folder) - opts.output_folder = os.path.abspath(opts.output_folder) - if not os.path.exists(opts.output_folder): - os.makedirs(opts.output_folder) - - main_environment = os.getcwd() - - ## Stage 1: Overall Quality Improve - print("Running Stage 1: Overall restoration") - os.chdir("./Global") - stage_1_input_dir = opts.input_folder - stage_1_output_dir = os.path.join(opts.output_folder, "stage_1_restore_output") - if not os.path.exists(stage_1_output_dir): - os.makedirs(stage_1_output_dir) - - if not opts.with_scratch: - stage_1_command = ( - "python test.py --test_mode Full --Quality_restore --test_input " - + stage_1_input_dir - + " --outputs_dir " - + stage_1_output_dir - + " --gpu_ids " - + gpu1 - ) - run_cmd(stage_1_command) - else: - - mask_dir = os.path.join(stage_1_output_dir, "masks") - new_input = os.path.join(mask_dir, "input") - new_mask = os.path.join(mask_dir, "mask") - stage_1_command_1 = ( - "python detection.py --test_path " - + stage_1_input_dir - + " --output_dir " - + mask_dir - + " --input_size full_size" - + " --GPU " - + gpu1 - ) - - if opts.HR: - HR_suffix=" --HR" - else: - HR_suffix="" - - stage_1_command_2 = ( - "python test.py --Scratch_and_Quality_restore --test_input " - + new_input - + " --test_mask " - + new_mask - + " --outputs_dir " - + stage_1_output_dir - + " --gpu_ids " - + gpu1 + HR_suffix - ) - - run_cmd(stage_1_command_1) - run_cmd(stage_1_command_2) - - ## Solve the case when there is no face in the old photo - stage_1_results = os.path.join(stage_1_output_dir, "restored_image") - stage_4_output_dir = os.path.join(opts.output_folder, "final_output") - if not os.path.exists(stage_4_output_dir): - os.makedirs(stage_4_output_dir) - for x in os.listdir(stage_1_results): - img_dir = os.path.join(stage_1_results, x) - shutil.copy(img_dir, stage_4_output_dir) - - print("Finish Stage 1 ...") - print("\n") - - ## Stage 2: Face Detection - - print("Running Stage 2: Face Detection") - os.chdir(".././Face_Detection") - stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image") - stage_2_output_dir = os.path.join(opts.output_folder, "stage_2_detection_output") - if not os.path.exists(stage_2_output_dir): - os.makedirs(stage_2_output_dir) - if opts.HR: - stage_2_command = ( - "python detect_all_dlib_HR.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir - ) - else: - stage_2_command = ( - "python detect_all_dlib.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir - ) - run_cmd(stage_2_command) - print("Finish Stage 2 ...") - print("\n") - - ## Stage 3: Face Restore - print("Running Stage 3: Face Enhancement") - os.chdir(".././Face_Enhancement") - stage_3_input_mask = "./" - stage_3_input_face = stage_2_output_dir - stage_3_output_dir = os.path.join(opts.output_folder, "stage_3_face_output") - if not os.path.exists(stage_3_output_dir): - os.makedirs(stage_3_output_dir) - - if opts.HR: - opts.checkpoint_name='FaceSR_512' - stage_3_command = ( - "python test_face.py --old_face_folder " - + stage_3_input_face - + " --old_face_label_folder " - + stage_3_input_mask - + " --tensorboard_log --name " - + opts.checkpoint_name - + " --gpu_ids " - + gpu1 - + " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir " - + stage_3_output_dir - + " --no_parsing_map" - ) - else: - stage_3_command = ( - "python test_face.py --old_face_folder " - + stage_3_input_face - + " --old_face_label_folder " - + stage_3_input_mask - + " --tensorboard_log --name " - + opts.checkpoint_name - + " --gpu_ids " - + gpu1 - + " --load_size 256 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 4 --results_dir " - + stage_3_output_dir - + " --no_parsing_map" - ) - run_cmd(stage_3_command) - print("Finish Stage 3 ...") - print("\n") - - ## Stage 4: Warp back - print("Running Stage 4: Blending") - os.chdir(".././Face_Detection") - stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image") - stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img") - stage_4_output_dir = os.path.join(opts.output_folder, "final_output") - if not os.path.exists(stage_4_output_dir): - os.makedirs(stage_4_output_dir) - if opts.HR: - stage_4_command = ( - "python align_warp_back_multiple_dlib_HR.py --origin_url " - + stage_4_input_image_dir - + " --replace_url " - + stage_4_input_face_dir - + " --save_url " - + stage_4_output_dir - ) - else: - stage_4_command = ( - "python align_warp_back_multiple_dlib.py --origin_url " - + stage_4_input_image_dir - + " --replace_url " - + stage_4_input_face_dir - + " --save_url " - + stage_4_output_dir - ) - run_cmd(stage_4_command) - print("Finish Stage 4 ...") - print("\n") - - print("All the processing is done. Please check the results.") - diff --git a/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/core_vq.py b/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/core_vq.py deleted file mode 100644 index e1896bb1788a945a1f7be6369abb255ecf72c7a0..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/core_vq.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from einops import rearrange, repeat -import flashy -import torch -from torch import nn, einsum -import torch.nn.functional as F - - -def exists(val: tp.Optional[tp.Any]) -> bool: - return val is not None - - -def default(val: tp.Any, d: tp.Any) -> tp.Any: - return val if exists(val) else d - - -def l2norm(t): - return F.normalize(t, p=2, dim=-1) - - -def ema_inplace(moving_avg, new, decay: float): - moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) - - -def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5): - return (x + epsilon) / (x.sum() + n_categories * epsilon) - - -def uniform_init(*shape: int): - t = torch.empty(shape) - nn.init.kaiming_uniform_(t) - return t - - -def sample_vectors(samples, num: int): - num_samples, device = samples.shape[0], samples.device - - if num_samples >= num: - indices = torch.randperm(num_samples, device=device)[:num] - else: - indices = torch.randint(0, num_samples, (num,), device=device) - - return samples[indices] - - -def kmeans(samples, num_clusters: int, num_iters: int = 10): - dim, dtype = samples.shape[-1], samples.dtype - - means = sample_vectors(samples, num_clusters) - - for _ in range(num_iters): - diffs = rearrange(samples, "n d -> n () d") - rearrange( - means, "c d -> () c d" - ) - dists = -(diffs ** 2).sum(dim=-1) - - buckets = dists.max(dim=-1).indices - bins = torch.bincount(buckets, minlength=num_clusters) - zero_mask = bins == 0 - bins_min_clamped = bins.masked_fill(zero_mask, 1) - - new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) - new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples) - new_means = new_means / bins_min_clamped[..., None] - - means = torch.where(zero_mask[..., None], means, new_means) - - return means, bins - - -def orthgonal_loss_fn(t): - # eq (2) from https://arxiv.org/abs/2112.00384 - n = t.shape[0] - normed_codes = l2norm(t) - identity = torch.eye(n, device=t.device) - cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes) - return ((cosine_sim - identity) ** 2).sum() / (n ** 2) - - -class EuclideanCodebook(nn.Module): - """Codebook with Euclidean distance. - - Args: - dim (int): Dimension. - codebook_size (int): Codebook size. - kmeans_init (bool): Whether to use k-means to initialize the codebooks. - If set to true, run the k-means algorithm on the first training batch and use - the learned centroids as initialization. - kmeans_iters (int): Number of iterations used for k-means algorithm at initialization. - decay (float): Decay for exponential moving average over the codebooks. - epsilon (float): Epsilon value for numerical stability. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - """ - def __init__( - self, - dim: int, - codebook_size: int, - kmeans_init: int = False, - kmeans_iters: int = 10, - decay: float = 0.8, - epsilon: float = 1e-5, - threshold_ema_dead_code: int = 2, - ): - super().__init__() - self.decay = decay - init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros - embed = init_fn(codebook_size, dim) - - self.codebook_size = codebook_size - - self.kmeans_iters = kmeans_iters - self.epsilon = epsilon - self.threshold_ema_dead_code = threshold_ema_dead_code - - self.register_buffer("inited", torch.Tensor([not kmeans_init])) - self.register_buffer("cluster_size", torch.zeros(codebook_size)) - self.register_buffer("embed", embed) - self.register_buffer("embed_avg", embed.clone()) - - @torch.jit.ignore - def init_embed_(self, data): - if self.inited: - return - - embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters) - self.embed.data.copy_(embed) - self.embed_avg.data.copy_(embed.clone()) - self.cluster_size.data.copy_(cluster_size) - self.inited.data.copy_(torch.Tensor([True])) - # Make sure all buffers across workers are in sync after initialization - flashy.distrib.broadcast_tensors(self.buffers()) - - def replace_(self, samples, mask): - modified_codebook = torch.where( - mask[..., None], sample_vectors(samples, self.codebook_size), self.embed - ) - self.embed.data.copy_(modified_codebook) - - def expire_codes_(self, batch_samples): - if self.threshold_ema_dead_code == 0: - return - - expired_codes = self.cluster_size < self.threshold_ema_dead_code - if not torch.any(expired_codes): - return - - batch_samples = rearrange(batch_samples, "... d -> (...) d") - self.replace_(batch_samples, mask=expired_codes) - flashy.distrib.broadcast_tensors(self.buffers()) - - def preprocess(self, x): - x = rearrange(x, "... d -> (...) d") - return x - - def quantize(self, x): - embed = self.embed.t() - dist = -( - x.pow(2).sum(1, keepdim=True) - - 2 * x @ embed - + embed.pow(2).sum(0, keepdim=True) - ) - embed_ind = dist.max(dim=-1).indices - return embed_ind - - def postprocess_emb(self, embed_ind, shape): - return embed_ind.view(*shape[:-1]) - - def dequantize(self, embed_ind): - quantize = F.embedding(embed_ind, self.embed) - return quantize - - def encode(self, x): - shape = x.shape - # pre-process - x = self.preprocess(x) - # quantize - embed_ind = self.quantize(x) - # post-process - embed_ind = self.postprocess_emb(embed_ind, shape) - return embed_ind - - def decode(self, embed_ind): - quantize = self.dequantize(embed_ind) - return quantize - - def forward(self, x): - shape, dtype = x.shape, x.dtype - x = self.preprocess(x) - self.init_embed_(x) - - embed_ind = self.quantize(x) - embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype) - embed_ind = self.postprocess_emb(embed_ind, shape) - quantize = self.dequantize(embed_ind) - - if self.training: - # We do the expiry of code at that point as buffers are in sync - # and all the workers will take the same decision. - self.expire_codes_(x) - ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay) - embed_sum = x.t() @ embed_onehot - ema_inplace(self.embed_avg, embed_sum.t(), self.decay) - cluster_size = ( - laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon) - * self.cluster_size.sum() - ) - embed_normalized = self.embed_avg / cluster_size.unsqueeze(1) - self.embed.data.copy_(embed_normalized) - - return quantize, embed_ind - - -class VectorQuantization(nn.Module): - """Vector quantization implementation. - Currently supports only euclidean distance. - - Args: - dim (int): Dimension - codebook_size (int): Codebook size - codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim. - decay (float): Decay for exponential moving average over the codebooks. - epsilon (float): Epsilon value for numerical stability. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): - channels_last (bool): Channels are the last dimension in the input tensors. - commitment_weight (float): Weight for commitment loss. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider - for orthogonal regulariation. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - """ - def __init__( - self, - dim: int, - codebook_size: int, - codebook_dim: tp.Optional[int] = None, - decay: float = 0.8, - epsilon: float = 1e-5, - kmeans_init: bool = False, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - channels_last: bool = False, - commitment_weight: float = 1., - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - _codebook_dim: int = default(codebook_dim, dim) - - requires_projection = _codebook_dim != dim - self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity()) - self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity()) - - self.epsilon = epsilon - self.commitment_weight = commitment_weight - - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - - self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size, - kmeans_init=kmeans_init, kmeans_iters=kmeans_iters, - decay=decay, epsilon=epsilon, - threshold_ema_dead_code=threshold_ema_dead_code) - self.codebook_size = codebook_size - - self.channels_last = channels_last - - @property - def codebook(self): - return self._codebook.embed - - @property - def inited(self): - return self._codebook.inited - - def _preprocess(self, x): - if not self.channels_last: - x = rearrange(x, "b d n -> b n d") - return x - - def _postprocess(self, quantize): - if not self.channels_last: - quantize = rearrange(quantize, "b n d -> b d n") - return quantize - - def encode(self, x): - x = self._preprocess(x) - x = self.project_in(x) - embed_in = self._codebook.encode(x) - return embed_in - - def decode(self, embed_ind): - quantize = self._codebook.decode(embed_ind) - quantize = self.project_out(quantize) - quantize = self._postprocess(quantize) - return quantize - - def forward(self, x): - device = x.device - x = self._preprocess(x) - - x = self.project_in(x) - quantize, embed_ind = self._codebook(x) - - if self.training: - quantize = x + (quantize - x).detach() - - loss = torch.tensor([0.0], device=device, requires_grad=self.training) - - if self.training: - if self.commitment_weight > 0: - commit_loss = F.mse_loss(quantize.detach(), x) - loss = loss + commit_loss * self.commitment_weight - - if self.orthogonal_reg_weight > 0: - codebook = self.codebook - - if self.orthogonal_reg_active_codes_only: - # only calculate orthogonal loss for the activated codes for this batch - unique_code_ids = torch.unique(embed_ind) - codebook = codebook[unique_code_ids] - - num_codes = codebook.shape[0] - if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes: - rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes] - codebook = codebook[rand_ids] - - orthogonal_reg_loss = orthgonal_loss_fn(codebook) - loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight - - quantize = self.project_out(quantize) - quantize = self._postprocess(quantize) - - return quantize, embed_ind, loss - - -class ResidualVectorQuantization(nn.Module): - """Residual vector quantization implementation. - - Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf - """ - def __init__(self, *, num_quantizers, **kwargs): - super().__init__() - self.layers = nn.ModuleList( - [VectorQuantization(**kwargs) for _ in range(num_quantizers)] - ) - - def forward(self, x, n_q: tp.Optional[int] = None): - quantized_out = 0.0 - residual = x - - all_losses = [] - all_indices = [] - - n_q = n_q or len(self.layers) - - for i, layer in enumerate(self.layers[:n_q]): - quantized, indices, loss = layer(residual) - residual = residual - quantized - quantized_out = quantized_out + quantized - all_indices.append(indices) - all_losses.append(loss) - - out_losses, out_indices = map(torch.stack, (all_losses, all_indices)) - return quantized_out, out_indices, out_losses - - def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor: - residual = x - all_indices = [] - n_q = n_q or len(self.layers) - for layer in self.layers[:n_q]: - indices = layer.encode(residual) - quantized = layer.decode(indices) - residual = residual - quantized - all_indices.append(indices) - out_indices = torch.stack(all_indices) - return out_indices - - def decode(self, q_indices: torch.Tensor) -> torch.Tensor: - quantized_out = torch.tensor(0.0, device=q_indices.device) - for i, indices in enumerate(q_indices): - layer = self.layers[i] - quantized = layer.decode(indices) - quantized_out = quantized_out + quantized - return quantized_out diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/core/evaler.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/core/evaler.py deleted file mode 100644 index e5f5946e7eb0a097aba691beb573340124e53e42..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/core/evaler.py +++ /dev/null @@ -1,151 +0,0 @@ -import matplotlib -matplotlib.use('Agg') -import math -import torch -import copy -import time -from torch.autograd import Variable -import shutil -from skimage import io -import numpy as np -from utils.utils import fan_NME, show_landmarks, get_preds_fromhm -from PIL import Image, ImageDraw -import os -import sys -import cv2 -import matplotlib.pyplot as plt - - -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -def eval_model(model, dataloaders, dataset_sizes, - writer, use_gpu=True, epoches=5, dataset='val', - save_path='./', num_landmarks=68): - global_nme = 0 - model.eval() - for epoch in range(epoches): - running_loss = 0 - step = 0 - total_nme = 0 - total_count = 0 - fail_count = 0 - nmes = [] - # running_corrects = 0 - - # Iterate over data. - with torch.no_grad(): - for data in dataloaders[dataset]: - total_runtime = 0 - run_count = 0 - step_start = time.time() - step += 1 - # get the inputs - inputs = data['image'].type(torch.FloatTensor) - labels_heatmap = data['heatmap'].type(torch.FloatTensor) - labels_boundary = data['boundary'].type(torch.FloatTensor) - landmarks = data['landmarks'].type(torch.FloatTensor) - loss_weight_map = data['weight_map'].type(torch.FloatTensor) - # wrap them in Variable - if use_gpu: - inputs = inputs.to(device) - labels_heatmap = labels_heatmap.to(device) - labels_boundary = labels_boundary.to(device) - loss_weight_map = loss_weight_map.to(device) - else: - inputs, labels_heatmap = Variable(inputs), Variable(labels_heatmap) - labels_boundary = Variable(labels_boundary) - labels = torch.cat((labels_heatmap, labels_boundary), 1) - single_start = time.time() - outputs, boundary_channels = model(inputs) - single_end = time.time() - total_runtime += time.time() - single_start - run_count += 1 - step_end = time.time() - for i in range(inputs.shape[0]): - print(inputs.shape) - img = inputs[i] - img = img.cpu().numpy() - img = img.transpose((1, 2, 0)) #*255.0 - # img = img.astype(np.uint8) - # img = Image.fromarray(img) - # pred_heatmap = outputs[-1][i].detach().cpu()[:-1, :, :] - pred_heatmap = outputs[-1][:, :-1, :, :][i].detach().cpu() - pred_landmarks, _ = get_preds_fromhm(pred_heatmap.unsqueeze(0)) - pred_landmarks = pred_landmarks.squeeze().numpy() - - gt_landmarks = data['landmarks'][i].numpy() - print(pred_landmarks, gt_landmarks) - import cv2 - while(True): - imgshow = vis_landmark_on_img(cv2.UMat(img), pred_landmarks*4) - cv2.imshow('img', imgshow) - - if(cv2.waitKey(10) == ord('q')): - break - - - if num_landmarks == 68: - left_eye = np.average(gt_landmarks[36:42], axis=0) - right_eye = np.average(gt_landmarks[42:48], axis=0) - norm_factor = np.linalg.norm(left_eye - right_eye) - # norm_factor = np.linalg.norm(gt_landmarks[36]- gt_landmarks[45]) - - elif num_landmarks == 98: - norm_factor = np.linalg.norm(gt_landmarks[60]- gt_landmarks[72]) - elif num_landmarks == 19: - left, top = gt_landmarks[-2, :] - right, bottom = gt_landmarks[-1, :] - norm_factor = math.sqrt(abs(right - left)*abs(top-bottom)) - gt_landmarks = gt_landmarks[:-2, :] - elif num_landmarks == 29: - # norm_factor = np.linalg.norm(gt_landmarks[8]- gt_landmarks[9]) - norm_factor = np.linalg.norm(gt_landmarks[16]- gt_landmarks[17]) - single_nme = (np.sum(np.linalg.norm(pred_landmarks*4 - gt_landmarks, axis=1)) / pred_landmarks.shape[0]) / norm_factor - - nmes.append(single_nme) - total_count += 1 - if single_nme > 0.1: - fail_count += 1 - if step % 10 == 0: - print('Step {} Time: {:.6f} Input Mean: {:.6f} Output Mean: {:.6f}'.format( - step, step_end - step_start, - torch.mean(labels), - torch.mean(outputs[0]))) - # gt_landmarks = landmarks.numpy() - # pred_heatmap = outputs[-1].to('cpu').numpy() - gt_landmarks = landmarks - batch_nme = fan_NME(outputs[-1][:, :-1, :, :].detach().cpu(), gt_landmarks, num_landmarks) - # batch_nme = 0 - total_nme += batch_nme - epoch_nme = total_nme / dataset_sizes['val'] - global_nme += epoch_nme - nme_save_path = os.path.join(save_path, 'nme_log.npy') - np.save(nme_save_path, np.array(nmes)) - print('NME: {:.6f} Failure Rate: {:.6f} Total Count: {:.6f} Fail Count: {:.6f}'.format(epoch_nme, fail_count/total_count, total_count, fail_count)) - print('Evaluation done! Average NME: {:.6f}'.format(global_nme/epoches)) - print('Everage runtime for a single batch: {:.6f}'.format(total_runtime/run_count)) - return model - - -def vis_landmark_on_img(img, shape, linewidth=2): - ''' - Visualize landmark on images. - ''' - - def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth): - for i in idx_list: - cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth) - if (loop): - cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]), - (shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth) - - draw_curve(list(range(0, 32))) # jaw - draw_curve(list(range(33, 41)), color=(0, 0, 255), loop=True) # eye brow - draw_curve(list(range(42, 50)), color=(0, 0, 255), loop=True) - draw_curve(list(range(51, 59))) # nose - draw_curve(list(range(60, 67)), loop=True) # eyes - draw_curve(list(range(68, 75)), loop=True) - draw_curve(list(range(76, 87)), loop=True, color=(0, 255, 255)) # mouth - draw_curve(list(range(88, 95)), loop=True, color=(255, 255, 0)) - - return img \ No newline at end of file diff --git a/spaces/mateuseap/magic-vocals/vc_infer_pipeline.py b/spaces/mateuseap/magic-vocals/vc_infer_pipeline.py deleted file mode 100644 index a0b50d4c703b7638d7c951c9d820a1e59c275fc3..0000000000000000000000000000000000000000 --- a/spaces/mateuseap/magic-vocals/vc_infer_pipeline.py +++ /dev/null @@ -1,646 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import torchcrepe # Fork feature. Use the crepe f0 algorithm. New dependency (pip install torchcrepe) -from torch import Tensor -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - # Get cuda device - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - # Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library - # Else wise return the "cpu" as a torch device, - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - model="full", # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - model="full", - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ): - # Get various f0 methods from input to use in the computation stack - s = methods_str - s = s.split("hybrid")[1] - s = s.replace("[", "").replace("]", "") - methods = s.split("+") - f0_computation_stack = [] - - print("Calculating f0 pitch estimations for methods: %s" % str(methods)) - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - for method in methods: - f0 = None - if method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - f0 = f0[1:] # Get rid of extra first frame - elif method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - f0 = f0[1:] # Get rid of extra first frame - elif method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif method == "harvest": - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - elif method == "dio": # Potentially buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] - # elif method == "pyin": Not Working just yet - # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max) - # Push method to the stack - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print("Calculating hybrid median f0 from the stack of: %s" % str(methods)) - f0_median_hybrid = None - if len(f0_computation_stack) == 1: - f0_median_hybrid = f0_computation_stack[0] - else: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "dio": # Potentially Buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - elif f0_method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - elif f0_method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif f0_method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - elif "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/models/musicgen.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/models/musicgen.py deleted file mode 100644 index 1d4b2292eaec5016e208bbdf61ec5c99b40b67da..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/models/musicgen.py +++ /dev/null @@ -1,409 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Main model for using MusicGen. This will combine all the required components -and provide easy access to the generation API. -""" - -import typing as tp -import warnings - -import torch - -from .encodec import CompressionModel -from .lm import LMModel -from .builders import get_debug_compression_model, get_debug_lm_model -from .loaders import load_compression_model, load_lm_model -from ..data.audio_utils import convert_audio -from ..modules.conditioners import ConditioningAttributes, WavCondition -from ..utils.autocast import TorchAutocast - - -MelodyList = tp.List[tp.Optional[torch.Tensor]] -MelodyType = tp.Union[torch.Tensor, MelodyList] - - -# backward compatible names mapping -_HF_MODEL_CHECKPOINTS_MAP = { - "small": "GrandaddyShmax/musicgen-small", - "medium": "GrandaddyShmax/musicgen-medium", - "large": "GrandaddyShmax/musicgen-large", - "melody": "GrandaddyShmax/musicgen-melody", -} - - -class MusicGen: - """MusicGen main model with convenient generation API. - - Args: - name (str): name of the model. - compression_model (CompressionModel): Compression model - used to map audio to invertible discrete representations. - lm (LMModel): Language model over discrete representations. - max_duration (float, optional): maximum duration the model can produce, - otherwise, inferred from the training params. - """ - def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, - max_duration: tp.Optional[float] = None): - self.name = name - self.compression_model = compression_model - self.lm = lm - if max_duration is None: - if hasattr(lm, 'cfg'): - max_duration = lm.cfg.dataset.segment_duration # type: ignore - else: - raise ValueError("You must provide max_duration when building directly MusicGen") - assert max_duration is not None - self.max_duration: float = max_duration - self.device = next(iter(lm.parameters())).device - self.generation_params: dict = {} - self.set_generation_params(duration=15) # 15 seconds by default - self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None - if self.device.type == 'cpu': - self.autocast = TorchAutocast(enabled=False) - else: - self.autocast = TorchAutocast( - enabled=True, device_type=self.device.type, dtype=torch.float16) - - @property - def frame_rate(self) -> float: - """Roughly the number of AR steps per seconds.""" - return self.compression_model.frame_rate - - @property - def sample_rate(self) -> int: - """Sample rate of the generated audio.""" - return self.compression_model.sample_rate - - @property - def audio_channels(self) -> int: - """Audio channels of the generated audio.""" - return self.compression_model.channels - - @staticmethod - def get_pretrained(name: str = 'GrandaddyShmax/musicgen-melody', device=None): - """Return pretrained model, we provide four models: - - facebook/musicgen-small (300M), text to music, - # see: https://huggingface.co/facebook/musicgen-small - - facebook/musicgen-medium (1.5B), text to music, - # see: https://huggingface.co/facebook/musicgen-medium - - facebook/musicgen-melody (1.5B) text to music and text+melody to music, - # see: https://huggingface.co/facebook/musicgen-melody - - facebook/musicgen-large (3.3B), text to music, - # see: https://huggingface.co/facebook/musicgen-large - """ - if device is None: - if torch.cuda.device_count(): - device = 'cuda' - else: - device = 'cpu' - - if name == 'debug': - # used only for unit tests - compression_model = get_debug_compression_model(device) - lm = get_debug_lm_model(device) - return MusicGen(name, compression_model, lm, max_duration=30) - - lm = load_lm_model(name, device=device) - compression_model = load_compression_model(name, device=device) - if 'self_wav' in lm.condition_provider.conditioners: - lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True - - return MusicGen(name, compression_model, lm) - - def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, - top_p: float = 0.0, temperature: float = 1.0, - duration: float = 30.0, cfg_coef: float = 3.0, - two_step_cfg: bool = False, extend_stride: float = 18): - """Set the generation parameters for MusicGen. - - Args: - use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. - top_k (int, optional): top_k used for sampling. Defaults to 250. - top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. - temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. - duration (float, optional): Duration of the generated waveform. Defaults to 30.0. - cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. - two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, - instead of batching together the two. This has some impact on how things - are padded but seems to have little impact in practice. - extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much - should we extend the audio each time. Larger values will mean less context is - preserved, and shorter value will require extra computations. - """ - assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." - self.extend_stride = extend_stride - self.duration = duration - self.generation_params = { - 'use_sampling': use_sampling, - 'temp': temperature, - 'top_k': top_k, - 'top_p': top_p, - 'cfg_coef': cfg_coef, - 'two_step_cfg': two_step_cfg, - } - - def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): - """Override the default progress callback.""" - self._progress_callback = progress_callback - - def generate_unconditional(self, num_samples: int, progress: bool = False, return_tokens: bool = False) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]: - """Generate samples in an unconditional manner. - - Args: - num_samples (int): Number of samples to be generated. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - descriptions: tp.List[tp.Optional[str]] = [None] * num_samples - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - tokens = self._generate_tokens(attributes, prompt_tokens, progress) - if return_tokens: - return self.generate_audio(tokens), tokens - return self.generate_audio(tokens) - - def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \ - -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]: - """Generate samples conditioned on text. - - Args: - descriptions (list of str): A list of strings used as text conditioning. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - assert prompt_tokens is None - tokens = self._generate_tokens(attributes, prompt_tokens, progress) - if return_tokens: - return self.generate_audio(tokens), tokens - return self.generate_audio(tokens) - - def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, melody_sample_rate: int, progress: bool = False, return_tokens: bool = False) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]: - """Generate samples conditioned on text and melody. - - Args: - descriptions (list of str): A list of strings used as text conditioning. - melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as - melody conditioning. Should have shape [B, C, T] with B matching the description length, - C=1 or 2. It can be [C, T] if there is a single description. It can also be - a list of [C, T] tensors. - melody_sample_rate: (int): Sample rate of the melody waveforms. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if isinstance(melody_wavs, torch.Tensor): - if melody_wavs.dim() == 2: - melody_wavs = melody_wavs[None] - if melody_wavs.dim() != 3: - raise ValueError("Melody wavs should have a shape [B, C, T].") - melody_wavs = list(melody_wavs) - else: - for melody in melody_wavs: - if melody is not None: - assert melody.dim() == 2, "One melody in the list has the wrong number of dims." - - melody_wavs = [ - convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels) - if wav is not None else None - for wav in melody_wavs] - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, - melody_wavs=melody_wavs) - assert prompt_tokens is None - tokens = self._generate_tokens(attributes, prompt_tokens, progress) - if return_tokens: - return self.generate_audio(tokens), tokens - return self.generate_audio(tokens) - - def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, - descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, - progress: bool = False, return_tokens: bool = False) \ - -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]: - """Generate samples conditioned on audio prompts. - - Args: - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - prompt_sample_rate (int): Sampling rate of the given audio waveforms. - descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - assert prompt_tokens is not None - tokens = self._generate_tokens(attributes, prompt_tokens, progress) - if return_tokens: - return self.generate_audio(tokens), tokens - return self.generate_audio(tokens) - - @torch.no_grad() - def _prepare_tokens_and_attributes( - self, - descriptions: tp.Sequence[tp.Optional[str]], - prompt: tp.Optional[torch.Tensor], - melody_wavs: tp.Optional[MelodyList] = None, - ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: - """Prepare model inputs. - - Args: - descriptions (list of str): A list of strings used as text conditioning. - prompt (torch.Tensor): A batch of waveforms used for continuation. - melody_wavs (torch.Tensor, optional): A batch of waveforms - used as melody conditioning. Defaults to None. - """ - attributes = [ - ConditioningAttributes(text={'description': description}) - for description in descriptions] - - if melody_wavs is None: - for attr in attributes: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1, 1), device=self.device), - torch.tensor([0], device=self.device), - sample_rate=[self.sample_rate], - path=[None]) - else: - if 'self_wav' not in self.lm.condition_provider.conditioners: - raise RuntimeError("This model doesn't support melody conditioning. " - "Use the `melody` model.") - assert len(melody_wavs) == len(descriptions), \ - f"number of melody wavs must match number of descriptions! " \ - f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}" - for attr, melody in zip(attributes, melody_wavs): - if melody is None: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1, 1), device=self.device), - torch.tensor([0], device=self.device), - sample_rate=[self.sample_rate], - path=[None]) - else: - attr.wav['self_wav'] = WavCondition( - melody[None].to(device=self.device), - torch.tensor([melody.shape[-1]], device=self.device), - sample_rate=[self.sample_rate], - path=[None], - ) - - if prompt is not None: - if descriptions is not None: - assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" - prompt = prompt.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt) - assert scale is None - else: - prompt_tokens = None - return attributes, prompt_tokens - - def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - """Generate discrete audio tokens given audio prompt and/or conditions. - - Args: - attributes (list of ConditioningAttributes): Conditions used for generation (text/melody). - prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - Returns: - torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - """ - i = 0 - prompt_list = attributes[0].text['description'] - total_gen_len = int(self.duration * self.frame_rate) - max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) - current_gen_offset: int = 0 - - def _progress_callback(generated_tokens: int, tokens_to_generate: int): - generated_tokens += current_gen_offset - if current_gen_offset > 0: - generated_tokens += (self.max_duration - self.extend_stride) * self.frame_rate - if self._progress_callback is not None: - # Note that total_gen_len might be quite wrong depending on the - # codebook pattern used, but with delay it is almost accurate. - self._progress_callback(generated_tokens, total_gen_len) - else: - print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') - - if prompt_tokens is not None: - assert max_prompt_len >= prompt_tokens.shape[-1], \ - "Prompt is longer than audio to generate" - - callback = None - if progress: - callback = _progress_callback - - if self.duration <= self.max_duration: - # generate by sampling from LM, simple case. - with self.autocast: - attributes[0].text['description'] = prompt_list[0] - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=total_gen_len, **self.generation_params) - - else: - # now this gets a bit messier, we need to handle prompts, - # melody conditioning etc. - ref_wavs = [attr.wav['self_wav'] for attr in attributes] - all_tokens = [] - if prompt_tokens is None: - prompt_length = 0 - else: - all_tokens.append(prompt_tokens) - prompt_length = prompt_tokens.shape[-1] - - stride_tokens = int(self.frame_rate * self.extend_stride) - - while current_gen_offset + prompt_length < total_gen_len: - time_offset = current_gen_offset / self.frame_rate - chunk_duration = min(self.duration - time_offset, self.max_duration) - max_gen_len = int(chunk_duration * self.frame_rate) - for attr, ref_wav in zip(attributes, ref_wavs): - wav_length = ref_wav.length.item() - if wav_length == 0: - continue - # We will extend the wav periodically if it not long enough. - # we have to do it here rather than in conditioners.py as otherwise - # we wouldn't have the full wav. - initial_position = int(time_offset * self.sample_rate) - wav_target_length = int(self.max_duration * self.sample_rate) - positions = torch.arange(initial_position, - initial_position + wav_target_length, device=self.device) - attr.wav['self_wav'] = WavCondition( - ref_wav[0][..., positions % wav_length], - torch.full_like(ref_wav[1], wav_target_length), - [self.sample_rate] * ref_wav[0].size(0), - [None], [0.]) - with self.autocast: - if i >= len(prompt_list): - i = len(prompt_list) - 1 - attributes[0].text['description'] = prompt_list[i] - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=max_gen_len, **self.generation_params) - i = i + 1 - if prompt_tokens is None: - all_tokens.append(gen_tokens) - else: - all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) - prompt_tokens = gen_tokens[:, :, stride_tokens:] - prompt_length = prompt_tokens.shape[-1] - current_gen_offset += stride_tokens - - gen_tokens = torch.cat(all_tokens, dim=-1) - return gen_tokens - - def generate_audio(self, gen_tokens: torch.Tensor): - """Generate Audio from tokens""" - assert gen_tokens.dim() == 3 - with torch.no_grad(): - gen_audio = self.compression_model.decode(gen_tokens, None) - return gen_audio - - def to(self, device: str): - self.compression_model.to(device) - self.lm.to(device) - return self \ No newline at end of file diff --git a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/training.py b/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/training.py deleted file mode 100644 index 2d66f08f45ce7bc753ac463489fc9a8d14a6b8b6..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/training.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES -# SPDX-License-Identifier: MIT - -import logging -import pathlib -from typing import List - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -from apex.optimizers import FusedAdam, FusedLAMB -from torch.nn.modules.loss import _Loss -from torch.nn.parallel import DistributedDataParallel -from torch.optim import Optimizer -from torch.utils.data import DataLoader, DistributedSampler -from tqdm import tqdm - -from se3_transformer.data_loading import QM9DataModule -from se3_transformer.model import SE3TransformerPooled -from se3_transformer.model.fiber import Fiber -from se3_transformer.runtime import gpu_affinity -from se3_transformer.runtime.arguments import PARSER -from se3_transformer.runtime.callbacks import QM9MetricCallback, QM9LRSchedulerCallback, BaseCallback, \ - PerformanceCallback -from se3_transformer.runtime.inference import evaluate -from se3_transformer.runtime.loggers import LoggerCollection, DLLogger, WandbLogger, Logger -from se3_transformer.runtime.utils import to_cuda, get_local_rank, init_distributed, seed_everything, \ - using_tensor_cores, increase_l2_fetch_granularity - - -def save_state(model: nn.Module, optimizer: Optimizer, epoch: int, path: pathlib.Path, callbacks: List[BaseCallback]): - """ Saves model, optimizer and epoch states to path (only once per node) """ - if get_local_rank() == 0: - state_dict = model.module.state_dict() if isinstance(model, DistributedDataParallel) else model.state_dict() - checkpoint = { - 'state_dict': state_dict, - 'optimizer_state_dict': optimizer.state_dict(), - 'epoch': epoch - } - for callback in callbacks: - callback.on_checkpoint_save(checkpoint) - - torch.save(checkpoint, str(path)) - logging.info(f'Saved checkpoint to {str(path)}') - - -def load_state(model: nn.Module, optimizer: Optimizer, path: pathlib.Path, callbacks: List[BaseCallback]): - """ Loads model, optimizer and epoch states from path """ - checkpoint = torch.load(str(path), map_location={'cuda:0': f'cuda:{get_local_rank()}'}) - if isinstance(model, DistributedDataParallel): - model.module.load_state_dict(checkpoint['state_dict']) - else: - model.load_state_dict(checkpoint['state_dict']) - optimizer.load_state_dict(checkpoint['optimizer_state_dict']) - - for callback in callbacks: - callback.on_checkpoint_load(checkpoint) - - logging.info(f'Loaded checkpoint from {str(path)}') - return checkpoint['epoch'] - - -def train_epoch(train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args): - losses = [] - for i, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), unit='batch', - desc=f'Epoch {epoch_idx}', disable=(args.silent or local_rank != 0)): - *inputs, target = to_cuda(batch) - - for callback in callbacks: - callback.on_batch_start() - - with torch.cuda.amp.autocast(enabled=args.amp): - pred = model(*inputs) - loss = loss_fn(pred, target) / args.accumulate_grad_batches - - grad_scaler.scale(loss).backward() - - # gradient accumulation - if (i + 1) % args.accumulate_grad_batches == 0 or (i + 1) == len(train_dataloader): - if args.gradient_clip: - grad_scaler.unscale_(optimizer) - torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip) - - grad_scaler.step(optimizer) - grad_scaler.update() - optimizer.zero_grad() - - losses.append(loss.item()) - - return np.mean(losses) - - -def train(model: nn.Module, - loss_fn: _Loss, - train_dataloader: DataLoader, - val_dataloader: DataLoader, - callbacks: List[BaseCallback], - logger: Logger, - args): - device = torch.cuda.current_device() - model.to(device=device) - local_rank = get_local_rank() - world_size = dist.get_world_size() if dist.is_initialized() else 1 - - if dist.is_initialized(): - model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank) - - model.train() - grad_scaler = torch.cuda.amp.GradScaler(enabled=args.amp) - if args.optimizer == 'adam': - optimizer = FusedAdam(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999), - weight_decay=args.weight_decay) - elif args.optimizer == 'lamb': - optimizer = FusedLAMB(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999), - weight_decay=args.weight_decay) - else: - optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, - weight_decay=args.weight_decay) - - epoch_start = load_state(model, optimizer, args.load_ckpt_path, callbacks) if args.load_ckpt_path else 0 - - for callback in callbacks: - callback.on_fit_start(optimizer, args) - - for epoch_idx in range(epoch_start, args.epochs): - if isinstance(train_dataloader.sampler, DistributedSampler): - train_dataloader.sampler.set_epoch(epoch_idx) - - loss = train_epoch(train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args) - if dist.is_initialized(): - loss = torch.tensor(loss, dtype=torch.float, device=device) - torch.distributed.all_reduce(loss) - loss = (loss / world_size).item() - - logging.info(f'Train loss: {loss}') - logger.log_metrics({'train loss': loss}, epoch_idx) - - for callback in callbacks: - callback.on_epoch_end() - - if not args.benchmark and args.save_ckpt_path is not None and args.ckpt_interval > 0 \ - and (epoch_idx + 1) % args.ckpt_interval == 0: - save_state(model, optimizer, epoch_idx, args.save_ckpt_path, callbacks) - - if not args.benchmark and args.eval_interval > 0 and (epoch_idx + 1) % args.eval_interval == 0: - evaluate(model, val_dataloader, callbacks, args) - model.train() - - for callback in callbacks: - callback.on_validation_end(epoch_idx) - - if args.save_ckpt_path is not None and not args.benchmark: - save_state(model, optimizer, args.epochs, args.save_ckpt_path, callbacks) - - for callback in callbacks: - callback.on_fit_end() - - -def print_parameters_count(model): - num_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad) - logging.info(f'Number of trainable parameters: {num_params_trainable}') - - -if __name__ == '__main__': - is_distributed = init_distributed() - local_rank = get_local_rank() - args = PARSER.parse_args() - - logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO) - - logging.info('====== SE(3)-Transformer ======') - logging.info('| Training procedure |') - logging.info('===============================') - - if args.seed is not None: - logging.info(f'Using seed {args.seed}') - seed_everything(args.seed) - - logger = LoggerCollection([ - DLLogger(save_dir=args.log_dir, filename=args.dllogger_name), - WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer') - ]) - - datamodule = QM9DataModule(**vars(args)) - model = SE3TransformerPooled( - fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}), - fiber_out=Fiber({0: args.num_degrees * args.num_channels}), - fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}), - output_dim=1, - tensor_cores=using_tensor_cores(args.amp), # use Tensor Cores more effectively - **vars(args) - ) - loss_fn = nn.L1Loss() - - if args.benchmark: - logging.info('Running benchmark mode') - world_size = dist.get_world_size() if dist.is_initialized() else 1 - callbacks = [PerformanceCallback(logger, args.batch_size * world_size)] - else: - callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='validation'), - QM9LRSchedulerCallback(logger, epochs=args.epochs)] - - if is_distributed: - gpu_affinity.set_affinity(gpu_id=get_local_rank(), nproc_per_node=torch.cuda.device_count()) - - print_parameters_count(model) - logger.log_hyperparams(vars(args)) - increase_l2_fetch_granularity() - train(model, - loss_fn, - datamodule.train_dataloader(), - datamodule.val_dataloader(), - callbacks, - logger, - args) - - logging.info('Training finished successfully') diff --git a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/utils.py b/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/utils.py deleted file mode 100644 index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000 --- a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/utils.py +++ /dev/null @@ -1,268 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import copy -import math - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - - -def _get_clones(module, N, layer_share=False): - # import ipdb; ipdb.set_trace() - if layer_share: - return nn.ModuleList([module for i in range(N)]) - else: - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def get_sine_pos_embed( - pos_tensor: torch.Tensor, - num_pos_feats: int = 128, - temperature: int = 10000, - exchange_xy: bool = True, -): - """generate sine position embedding from a position tensor - Args: - pos_tensor (torch.Tensor): shape: [..., n]. - num_pos_feats (int): projected shape for each float in the tensor. - temperature (int): temperature in the sine/cosine function. - exchange_xy (bool, optional): exchange pos x and pos y. \ - For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. - Returns: - pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. - """ - scale = 2 * math.pi - dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) - dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) - - def sine_func(x: torch.Tensor): - sin_x = x * scale / dim_t - sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) - return sin_x - - pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] - if exchange_xy: - pos_res[0], pos_res[1] = pos_res[1], pos_res[0] - pos_res = torch.cat(pos_res, dim=-1) - return pos_res - - -def gen_encoder_output_proposals( - memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None -): - """ - Input: - - memory: bs, \sum{hw}, d_model - - memory_padding_mask: bs, \sum{hw} - - spatial_shapes: nlevel, 2 - - learnedwh: 2 - Output: - - output_memory: bs, \sum{hw}, d_model - - output_proposals: bs, \sum{hw}, 4 - """ - N_, S_, C_ = memory.shape - proposals = [] - _cur = 0 - for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - # import ipdb; ipdb.set_trace() - - grid_y, grid_x = torch.meshgrid( - torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), - torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device), - ) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2 - - scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - - if learnedwh is not None: - # import ipdb; ipdb.set_trace() - wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl) - else: - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - - # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1) - # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - # wh = torch.ones_like(grid) / scale - proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) - proposals.append(proposal) - _cur += H_ * W_ - # import ipdb; ipdb.set_trace() - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( - -1, keepdim=True - ) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid - output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf")) - output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) - - output_memory = memory - output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) - - # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) - # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf')) - - return output_memory, output_proposals - - -class RandomBoxPerturber: - def __init__( - self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2 - ) -> None: - self.noise_scale = torch.Tensor( - [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale] - ) - - def __call__(self, refanchors: Tensor) -> Tensor: - nq, bs, query_dim = refanchors.shape - device = refanchors.device - - noise_raw = torch.rand_like(refanchors) - noise_scale = self.noise_scale.to(device)[:query_dim] - - new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale) - return new_refanchors.clamp_(0, 1) - - -def sigmoid_focal_loss( - inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - if no_reduction: - return loss - - return loss.mean(1).sum() / num_boxes - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def _get_activation_fn(activation, d_model=256, batch_dim=0): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - if activation == "prelu": - return nn.PReLU() - if activation == "selu": - return F.selu - - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") - - -def gen_sineembed_for_position(pos_tensor): - # n_query, bs, _ = pos_tensor.size() - # sineembed_tensor = torch.zeros(n_query, bs, 256) - scale = 2 * math.pi - dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) - dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128) - x_embed = pos_tensor[:, :, 0] * scale - y_embed = pos_tensor[:, :, 1] * scale - pos_x = x_embed[:, :, None] / dim_t - pos_y = y_embed[:, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) - pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) - if pos_tensor.size(-1) == 2: - pos = torch.cat((pos_y, pos_x), dim=2) - elif pos_tensor.size(-1) == 4: - w_embed = pos_tensor[:, :, 2] * scale - pos_w = w_embed[:, :, None] / dim_t - pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) - - h_embed = pos_tensor[:, :, 3] * scale - pos_h = h_embed[:, :, None] / dim_t - pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) - - pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) - else: - raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) - return pos - - -class ContrastiveEmbed(nn.Module): - def __init__(self, max_text_len=256): - """ - Args: - max_text_len: max length of text. - """ - super().__init__() - self.max_text_len = max_text_len - - def forward(self, x, text_dict): - """_summary_ - - Args: - x (_type_): _description_ - text_dict (_type_): _description_ - { - 'encoded_text': encoded_text, # bs, 195, d_model - 'text_token_mask': text_token_mask, # bs, 195 - # True for used tokens. False for padding tokens - } - Returns: - _type_: _description_ - """ - assert isinstance(text_dict, dict) - - y = text_dict["encoded_text"] - text_token_mask = text_dict["text_token_mask"] - - res = x @ y.transpose(-1, -2) - res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) - - # padding to max_text_len - new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) - new_res[..., : res.shape[-1]] = res - - return new_res diff --git a/spaces/merve/data-leak/source/anonymization/make-gs.js b/spaces/merve/data-leak/source/anonymization/make-gs.js deleted file mode 100644 index 4eb1aaeffeb2a69e726a9d452d7eea7b3352b318..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/anonymization/make-gs.js +++ /dev/null @@ -1,105 +0,0 @@ -window.makeGS = function(){ - var prevSlideIndex = -1 - function updateSlide(i){ - var slide = slides[i] - if (!slide) return - - d3.select('.tooltip').classed('tooltip-hidden', true) - - var dur = 500 - - sel.student.transition('xKey').duration(dur).delay(dur ? slide.circleDelayFn : 0) - .translate(d => (d.isAdditionalStudent && slide.xKey != 'plagerizedShifted') ? [0,0]: d.pos[slide.xKey]) - - - if (sel.rectAt[slide.xKey]){ - sel.uniqueBox.transition('at').duration(dur) - .delay(d => dur ? slide.circleDelayFn(d.d0) : 0) - .at(sel.rectAt[slide.xKey]) - .translate(d => d.d0.group[slide.xKey].pos) - } - - sel.uniqueBox.transition().duration(dur) - .st({opacity: slide.showUniqueBox ? 1 : 0}) - - sel.uniqueSeasonBox.transition() - .delay((d, i) => slide.showUniqueSeasonBox ? dur*2 + i*40 : 0).duration(slide.showUniqueSeasonBox ? 0 : dur) - .st({opacity: slide.showUniqueSeasonBox ? 1 : 0}) - - - if (sliders.headsProb != slide.headsProbTarget && slide.animateHeadsProbSlider != -1){ - var headI = d3.interpolate(sliders.headsProb, slide.headsProbTarget) - if (window.headSliderTimer) window.headSliderTimer.stop() - window.headSliderTimer = d3.timer(ms => { - var dur = slide.animateHeadsProbSlider ? 2000 : 1 - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - sliders.updateHeadsProb(headI(t)) - if (t == 1) headSliderTimer.stop() - }) - } - - if (sliders.population != slide.populationTarget){ - var popI = d3.interpolate(sliders.population, slide.populationTarget) - if (window.popSliderTimer) window.popSliderTimer.stop() - window.popSliderTimer = d3.timer(ms => { - var dur = slide.animatePopulationSlider ? 2000 : 1 - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - sliders.updatePopulation(Math.round(popI(t)/2)*2) - if (t == 1) popSliderTimer.stop() - }) - } - - axii.stateAxis.transition().duration(dur/2) - .st({opacity: slide.showStateAxis ? 1 : 0}) - axii.ageAxis.transition().duration(dur/2) - .st({opacity: slide.showAgeAxis ? 1 : 0}) - axii.seasonAxis.transition().duration(dur/2) - .st({opacity: slide.showSeasonAxis ? 1 : 0}) - axii.headAxis.transition().duration(dur/2) - .st({opacity: slide.showHeadAxis ? 1 : 0}) - axii.headCaptionAxis.transition().duration(dur/2) - .st({opacity: slide.showHeadCaptionAxis ? 1 : 0}) - estimates.axisSel.transition().delay(dur).duration(dur/2) - .st({opacity: slide.showHistogramAxis ? 1 : 0}) - estimates.activeSel.transition().delay(dur).duration(dur/2) - .st({opacity: slide.showHistogramAxis ? 1 : 0}) - // axii.estimateAxis.transition().delay(dur).duration(dur/2) - // .st({opacity: slide.showEstimate && !slide.enterHistogram ? 1 : 0}) - // axii.plagerizedAxis.transition().delay(dur).duration(dur/2) - // .st({opacity: slide.showPlagerizedAxis ? 1 : 0}) - - - annotationSel.transition().duration(dur/2) - .st({opacity: d => i == d.slide ? 1 : 0}) - - estimates.containerSel.transition('xKey').duration(dur/2) - .st({opacity: slide.showHistogram ? 1 : 0}) - - if (slide.enterHistogram){ - estimates.render(true) - } else { - window.flipAllCoinsTimer._time = Infinity - } - if (slide.enterHistogram === 0) estimates.estimateSel.classed('active', 1) - - - // Display the default coin flip state if the histogram is not visible. - sel.flipCircle.transition().duration(dur) - .at({transform: d => { - return slide.showFlipCircle && d.coinVals[estimates.active.index] < sliders.headsProb ? 'scale(1)' : 'scale(.1)'}}) - - prevSlideIndex = i - slides.curSlide = slide - } - - var gs = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(300) - .on('active', updateSlide) -} - - -if (window.init) window.init() diff --git a/spaces/merve/hidden-bias/public/uncertainty-calibration/util.js b/spaces/merve/hidden-bias/public/uncertainty-calibration/util.js deleted file mode 100644 index a0ce5b12a2a642f1186cc4004e90b046a89611f8..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/uncertainty-calibration/util.js +++ /dev/null @@ -1,38 +0,0 @@ -window.initUtil = function(){ - function addAxisLabel(c, xText, yText, xOffset=40, yOffset=-40){ - c.svg.select('.x').append('g') - .translate([c.width/2, xOffset]) - .append('text.axis-label') - .text(xText) - .at({textAnchor: 'middle'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - - c.svg.select('.y') - .append('g') - .translate([yOffset, c.height/2]) - .append('text.axis-label') - .text(yText) - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - } - - function ggPlotBg(c, isBlack=true){ - if (isBlack){ - c.svg.append('rect.bg-rect') - .at({width: c.width, height: c.height, fill: '#eee'}) - .lower() - } - - c.svg.selectAll('.tick').selectAll('line').remove() - c.svg.selectAll('.y .tick') - .append('path').at({d: 'M 0 0 H ' + c.width, stroke: '#fff', strokeWidth: 1}) - c.svg.selectAll('.y text').at({x: -3}) - c.svg.selectAll('.x .tick') - .append('path').at({d: 'M 0 0 V -' + c.height, stroke: '#fff', strokeWidth: 1}) - } - - - return {addAxisLabel, ggPlotBg} -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/source/anonymization/style-graph-scroll.css b/spaces/merve/measuring-fairness/source/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/uncertainty-calibration/public/anonymization/make-slides.js b/spaces/merve/uncertainty-calibration/public/anonymization/make-slides.js deleted file mode 100644 index 3feff55ba9248cee61cd7ec881fade8ef661e67c..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/anonymization/make-slides.js +++ /dev/null @@ -1,98 +0,0 @@ -window.makeSlides = function(){ - var slides = [ - { - xKey: 'grid', - circleDelayFn: d => axii.ageScale(d.age), - showFlipRect: 0, - populationTarget: 144, - headsProbTarget: .5, - }, - { - xKey: 'age', - showAgeAxis: 1, - }, - { - xKey: 'ageState', - showStateAxis: 1, - }, - { - showUniqueBox: 1 - }, - { - xKey: 'ageStateSeason', - showUniqueBox: 1, - showUniqueSeasonBox: 1, - showSeasonAxis: 1, - }, - { - xKey: 'heads', - showUniqueBox: 0, - showUniqueSeasonBox: 0, - showSeasonAxis: 0, - showAgeAxis: 0, - showStateAxis: 0, - showHeadAxis: 1, - }, - { - showFlipCircle: 1, - showHeadCaptionAxis: 1, - }, - - // Flip coin - { - xKey: 'plagerizedShifted', - showHeadAxis: 0, - showHeadCaptionAxis: 0, - showHistogramAxis: 1, - }, - - // Exactly how far off can these estimates be after adding noise? Flip more coins to see the distribution. - { - enterHistogram: 1, - showHistogram: 1, - // showPlagerizedAxis: 0, - showEstimate: 1, - }, - - // Reducing the random noise increases our point estimate, but risks leaking information about students. - { - animateHeadsProbSlider: 1, - animatePopulationSlider: 1, - enterHistogram: 0, - name: 'noise', - headsProbTarget: .35, - }, - - // If we collect information from lots of people, we can have high accuracy and protect everyone's privacy. - { - showEstimate: 0, - showAllStudents: 1, - name: 'population', - animateHeadsProbSlider: -1, - animatePopulationSlider: 1, - populationTarget: 400, - }, - - ] - - var keys = [] - slides.forEach((d, i) => { - keys = keys.concat(d3.keys(d)) - d.index = i - }) - _.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) - }) - - return slides -} - - - -if (window.init) window.init() diff --git a/spaces/ml6team/logo-generator/dalle/models/stage2/transformer.py b/spaces/ml6team/logo-generator/dalle/models/stage2/transformer.py deleted file mode 100644 index b330fbcf5b50c7621642603e96d0c8172b1fedfe..0000000000000000000000000000000000000000 --- a/spaces/ml6team/logo-generator/dalle/models/stage2/transformer.py +++ /dev/null @@ -1,255 +0,0 @@ -# ------------------------------------------------------------------------------------ -# Minimal DALL-E -# Copyright (c) 2021 KakaoBrain. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------ -# Modified from minGPT (https://github.com/karpathy/minGPT) -# Copyright (c) 2020 Andrej Karpathy. All Rights Reserved. -# ------------------------------------------------------------------------------------ - -import torch -import torch.nn as nn -from typing import Optional, Tuple, List -from torch.cuda.amp import autocast -from omegaconf import OmegaConf -from .layers import Block - - -class Transformer1d(nn.Module): - - def __init__(self, - vocab_size_txt: int, - vocab_size_img: int, - hparams: OmegaConf) -> None: - super().__init__() - assert hparams.n_layers == hparams.n_dense_layers - - # input embedding for image and text - self.tok_emb_img = nn.Embedding(vocab_size_img, hparams.embed_dim) - self.tok_emb_txt = nn.Embedding(vocab_size_txt, hparams.embed_dim) - - self.pos_emb_img = nn.Embedding(hparams.ctx_len_img, hparams.embed_dim) - self.pos_emb_txt = nn.Embedding(hparams.ctx_len_txt, hparams.embed_dim) - - self.drop = nn.Dropout(hparams.embd_pdrop) - - # transformer blocks - self.blocks = [Block(ctx_len=hparams.ctx_len_img + hparams.ctx_len_txt, - embed_dim=hparams.embed_dim, - n_heads=hparams.n_heads, - mlp_bias=hparams.mlp_bias, - attn_bias=hparams.attn_bias, - resid_pdrop=hparams.resid_pdrop, - attn_pdrop=hparams.attn_pdrop, - gelu_use_approx=hparams.gelu_use_approx) for i in range(1, hparams.n_layers+1)] - self.blocks = nn.Sequential(*self.blocks) - - # heads for image and text - self.ln_f = nn.LayerNorm(hparams.embed_dim) - self.head_img = nn.Linear(hparams.embed_dim, vocab_size_img, bias=False) - self.head_txt = nn.Linear(hparams.embed_dim, vocab_size_txt, bias=False) - - self.ctx_len_img = hparams.ctx_len_img - self.ctx_len_txt = hparams.ctx_len_txt - self.n_layers = hparams.n_layers - - self.apply(self._init_weights) - - def _init_weights(self, module: nn.Module) -> None: - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def forward(self, - images: torch.LongTensor, - texts: torch.LongTensor, - pos_images: torch.LongTensor, - pos_texts: torch.LongTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]: - B, T = images.shape - _, N = texts.shape - - assert T <= self.ctx_len_img, "Already reached the maximum context length (image)." - assert N == self.ctx_len_txt, "Already reached the maximum context length (text)." - - texts = self.tok_emb_txt(texts) - images = self.tok_emb_img(images) - - texts = texts + self.pos_emb_txt(pos_texts) - images = images + self.pos_emb_img(pos_images) - - x = torch.cat([texts, images], axis=1).contiguous() - x = self.drop(x) - x = self.blocks(x) - x = self.ln_f(x) - - texts = x[:, :N-1].contiguous() - images = x[:, N-1:-1].contiguous() - - logits_txt = self.head_txt(texts) - logits_img = self.head_img(images) - return logits_img, logits_txt - - @torch.no_grad() - def sampling(self, - images: torch.LongTensor, - texts: torch.LongTensor, - pos_images: torch.LongTensor, - pos_texts: torch.LongTensor, - use_fp16: bool = True, - past: Optional[List[torch.Tensor]] = None) -> Tuple[torch.FloatTensor, List[torch.FloatTensor]]: - _, N = texts.shape - assert N == self.ctx_len_txt, "Already reached the maximum context length (text)." - - with autocast(enabled=use_fp16): - if images is None: - assert past is None - - texts = self.tok_emb_txt(texts) - x = texts + self.pos_emb_txt(pos_texts) - x = self.drop(x) - - presents = [] - for i, block in enumerate(self.blocks): - x, present = block.sample(x, layer_past=None) - presents.append(present) - x = self.ln_f(x) - x = x[:, N-1].contiguous() - logits = self.head_img(x) - else: - if past is None: - texts = self.tok_emb_txt(texts) - images = self.tok_emb_img(images) - texts = texts + self.pos_emb_txt(pos_texts) - images = images + self.pos_emb_img(pos_images) - x = torch.cat([texts, images], axis=1).contiguous() - else: - images = self.tok_emb_img(images) - x = images + self.pos_emb_img(pos_images) - x = self.drop(x) - - if past is not None: - past = torch.cat(past, dim=-2) - presents = [] - for i, block in enumerate(self.blocks): - x, present = block.sample(x, layer_past=None if past is None else past[i]) - presents.append(present) - x = self.ln_f(x) - x = x[:, -1].contiguous() - logits = self.head_img(x) - return logits, presents - - def from_ckpt(self, path: str) -> None: - ckpt = torch.load(path, map_location='cpu')['state_dict'] - self.load_state_dict(ckpt, strict=True) - print(f'{path} succesfully restored..') - - -class iGPT(nn.Module): - def __init__(self, - vocab_size_img: int, - use_cls_cond: bool, - hparams: OmegaConf) -> None: - super().__init__() - self.use_cls_cond = use_cls_cond - - # sos token embedding - if self.use_cls_cond: - self.sos = nn.Embedding(hparams.n_classes, hparams.embed_dim) - else: - self.sos = nn.Parameter(torch.randn(1, 1, hparams.embed_dim)) - - # input embedding - self.tok_emb_img = nn.Embedding(vocab_size_img, hparams.embed_dim) - self.pos_emb_img = nn.Embedding(hparams.ctx_len_img, hparams.embed_dim) - - self.drop = nn.Dropout(hparams.embd_pdrop) - - # transformer blocks - self.blocks = [Block(ctx_len=hparams.ctx_len_img + 1, - embed_dim=hparams.embed_dim, - n_heads=hparams.n_heads, - mlp_bias=hparams.mlp_bias, - attn_bias=hparams.attn_bias, - resid_pdrop=hparams.resid_pdrop, - attn_pdrop=hparams.attn_pdrop, - gelu_use_approx=hparams.gelu_use_approx) for i in range(1, hparams.n_layers+1)] - self.blocks = nn.Sequential(*self.blocks) - - # head - self.ln_f = nn.LayerNorm(hparams.embed_dim) - self.head = nn.Linear(hparams.embed_dim, vocab_size_img, bias=False) - - self.ctx_len_img = hparams.ctx_len_img - self.n_layers = hparams.n_layers - - self.apply(self._init_weights) - - def _init_weights(self, module: nn.Module) -> None: - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - @torch.no_grad() - def sampling(self, - sos: torch.FloatTensor, - codes: torch.LongTensor, - pos_codes: torch.LongTensor, - n_samples: int = 16, - use_fp16: bool = True, - past: Optional[torch.Tensor] = None) -> Tuple[torch.FloatTensor, List[torch.FloatTensor]]: - with autocast(enabled=use_fp16): - if codes is None: - assert past is None - xs = self.drop(sos) - presents = [] - for i, block in enumerate(self.blocks): - xs, present = block.sample(xs, layer_past=None) - presents.append(present) - xs = self.ln_f(xs) - logits = self.head(xs)[:, -1] - else: - if past is None: - xs = self.tok_emb_img(codes) + self.pos_emb_img(pos_codes) - xs = torch.cat([sos, xs], dim=1) - else: - xs = self.tok_emb_img(codes) + self.pos_emb_img(pos_codes) - xs = self.drop(xs) - - past = torch.cat(past, dim=-2) if past is not None else past - presents = [] - for i, block in enumerate(self.blocks): - xs, present = block.sample(xs, layer_past=None if past is None else past[i]) - presents.append(present) - - xs = self.ln_f(xs) - logits = self.head(xs)[:, -1] - return logits, presents - - def forward(self, - codes: torch.LongTensor, - labels: Optional[torch.LongTensor] = None) -> torch.FloatTensor: - B, T = codes.shape - xps = torch.arange(T, device=codes.device).repeat((B, 1)) - sos = self.sos.repeat((B, 1, 1)) if labels is None else self.sos(labels).unsqueeze(1) - - h = self.tok_emb_img(codes) + self.pos_emb_img(xps) - h = torch.cat([sos, h[:, :-1]], dim=1).contiguous() - - h = self.drop(h) - h = self.blocks(h) - h = self.ln_f(h) - logits = self.head(h) - return logits - - def from_ckpt(self, path: str, strict: bool = True) -> None: - ckpt = torch.load(path, map_location='cpu')['state_dict'] - self.load_state_dict(ckpt, strict=strict) - print(f'{path} successfully restored..') diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/model-card.md b/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/model-card.md deleted file mode 100644 index 2d22e25bea89fdbccdaa2809fbeb83e0a7cfaa07..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/model-card.md +++ /dev/null @@ -1,120 +0,0 @@ -# Model Card: CLIP - -Inspired by [Model Cards for Model Reporting (Mitchell et al.)](https://arxiv.org/abs/1810.03993) and [Lessons from Archives (Jo & Gebru)](https://arxiv.org/pdf/1912.10389.pdf), we’re providing some accompanying information about the multimodal model. - -## Model Details - -The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. - -### Model Date - -January 2021 - -### Model Type - -The base model uses a ResNet50 with several modifications as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. There is also a variant of the model where the ResNet image encoder is replaced with a Vision Transformer. - -### Model Versions - -Initially, we’ve released one CLIP model based on the Vision Transformer architecture equivalent to ViT-B/32, along with the RN50 model, using the architecture equivalent to ResNet-50. - -As part of the staged release process, we have also released the RN101 model, as well as RN50x4, a RN50 scaled up 4x according to the [EfficientNet](https://arxiv.org/abs/1905.11946) scaling rule. In July 2021, we additionally released the RN50x16 and ViT-B/16 models. - -Please see the paper linked below for further details about their specification. - -### Documents - -- [Blog Post](https://openai.com/blog/clip/) -- [CLIP Paper](https://arxiv.org/abs/2103.00020) - - - -## Model Use - -### Intended Use - -The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. - -#### Primary intended uses - -The primary intended users of these models are AI researchers. - -We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. - -### Out-of-Scope Use Cases - -**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. - -Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. - -Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. - - - -## Data - -The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. - -### Data Mission Statement - -Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. - - - -## Performance and Limitations - -### Performance - -We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - -- Food101 -- CIFAR10 -- CIFAR100 -- Birdsnap -- SUN397 -- Stanford Cars -- FGVC Aircraft -- VOC2007 -- DTD -- Oxford-IIIT Pet dataset -- Caltech101 -- Flowers102 -- MNIST -- SVHN -- IIIT5K -- Hateful Memes -- SST-2 -- UCF101 -- Kinetics700 -- Country211 -- CLEVR Counting -- KITTI Distance -- STL-10 -- RareAct -- Flickr30 -- MSCOCO -- ImageNet -- ImageNet-A -- ImageNet-R -- ImageNet Sketch -- ObjectNet (ImageNet Overlap) -- Youtube-BB -- ImageNet-Vid - -## Limitations - -CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. - -### Bias and Fairness - -We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). - -We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. - - - -## Feedback - -### Where to send questions or comments about the model - -Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9) diff --git a/spaces/mrfakename/tts/app.py b/spaces/mrfakename/tts/app.py deleted file mode 100644 index 39753d351e702d51a12fc02bee2905efbb985c15..0000000000000000000000000000000000000000 --- a/spaces/mrfakename/tts/app.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Copyright (c) 2022 mrfakename. All rights reserved. -Unauthorized reproduction of this program is strictly prohibited. -HuggingFace: https://huggingface.co/spaces/mrfakename/tts -GitHub: https://github.com/fakerybakery/streamlit-coqui - -Version 2.6.1 (December 6, 2022) -""" -import tempfile - -import streamlit as st - -from neon_tts_plugin_coqui import CoquiTTS - - -LANGUAGES = list(CoquiTTS.langs.keys()) -default_lang = "en" - - - -title = "Neural Text-to-Speech Online" -description = "Convert your text to speech free using Coqui TTS!" -info = """ -## About - -Ever wanted to generate realistic-sounding text to speech quickly? Well now you can with this new online tool! Generate neural text to speech quickly using Coqui TTS! - -Created by [mrfakename](https://mrfake.name/). - -## License - -Feel free to use generated audio freely (in videos, audio messages, etc). - -The code is copyrighted. Please view app.py for license details. - -## Credits & Disclaimer - -This text to speech tool was influenced by [this](https://huggingface.co/spaces/Gradio-Blocks/neon-tts-plugin-coqui) and uses [this](https://github.com/NeonGeckoCom/neon-tts-plugin-coqui) and [this](https://github.com/coqui-ai/TTS. - -DISCLAIMER: mrfakename is in no way affiliated with Coqui TTS, HuggingFace, GitHub, or NeonAI. -""" - - - -coquiTTS = CoquiTTS() - - -def tts(text: str, language: str): - print(text, language) - # return output - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - coquiTTS.get_tts(text, fp, speaker = {"language" : language}) - return fp.name - -st.title(title) -st.subheader(description) -st.markdown(info) -lang = st.selectbox('Please select a language:', (LANGUAGES)) -txt = st.text_area("Text") -if st.button('Submit'): - with st.spinner('Please wait...'): - audio_file = open(tts(txt, lang), 'rb') - audio_bytes = audio_file.read() - st.balloons() - st.success("Yay! Check out your TTS! P.S. Why not follow my GitHub, I'm @fakerybakery on GitHub!") - st.audio(audio_bytes, format='audio/wav') \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py b/spaces/mshukor/UnIVAL/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py deleted file mode 100644 index 8cc2a7174b765b7ad8808489196e12082a91a2d7..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.tasks import register_task -from fairseq.tasks.multilingual_translation import MultilingualTranslationTask -from fairseq.utils import safe_hasattr - -from .loss.latent_depth import LatentLayersKLLoss, LatentLayersSparsityLoss - - -@register_task("multilingual_translation_latent_depth") -class MultilingualTranslationTaskLatentDepth(MultilingualTranslationTask): - """A task for multiple translation with latent depth. - - See `"Deep Transformer with Latent Depth" - (Li et al., 2020) `_. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - MultilingualTranslationTask.add_args(parser) - parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder') - parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder') - parser.add_argument('--target-layers', default=-1, type=int, - help='number of effective layers to learn; -1 means no constraint') - parser.add_argument('--sparsity-weight', default=0.0, type=float, - help='weight for sparsity loss') - parser.add_argument('--share-weight', default=0.0, type=float, - help='weight for sharing loss') - parser.add_argument('--soft-update', default=1, type=int, - help='number of updates with soft sampling') - parser.add_argument('--anneal-updates', default=1, type=int, - help='number of updates to anneal the KL loss weight') - parser.add_argument('--prior', default="uniform", type=str, - help='prior used for computing KL loss') - # fmt: on - - def __init__(self, args, dicts, training): - super().__init__(args, dicts, training) - self.src_langs, self.tgt_langs = zip( - *[(lang.split("-")[0], lang.split("-")[1]) for lang in args.lang_pairs] - ) - if self.training and self.encoder_latent_layer: - assert self.args.share_encoders - if self.training and self.decoder_latent_layer: - assert self.args.share_decoders - if training or self.encoder_latent_layer or self.decoder_latent_layer: - self.lang_pairs = args.lang_pairs - else: - self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] - self.eval_lang_pairs = self.lang_pairs - self.model_lang_pairs = self.lang_pairs - if self.training and (self.encoder_latent_layer or self.decoder_latent_layer): - self.kl_loss = LatentLayersKLLoss(self.args) - self.sparsity_loss = LatentLayersSparsityLoss(self.args) - - def _per_lang_pair_train_loss( - self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad - ): - src, tgt = lang_pair.split("-") - if self.encoder_latent_layer: - src_lang_idx = self.src_lang_idx_dict[src] - model.models[lang_pair].encoder.set_lang_idx(src_lang_idx) - model.models[lang_pair].encoder.layer_select.hard_select = ( - update_num > self.args.soft_update - ) - if self.decoder_latent_layer: - tgt_lang_idx = self.tgt_lang_idx_dict[tgt] - model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx) - model.models[lang_pair].decoder.layer_select.hard_select = ( - update_num > self.args.soft_update - ) - - loss, sample_size, logging_output = criterion( - model.models[lang_pair], sample[lang_pair] - ) - if self.encoder_latent_layer: - none_samples = sum( - 1 if x is None else 0 - for x in model.models[lang_pair].encoder.layer_select.layer_samples - ) - if none_samples == 0 or self.args.prior != "agged_posterior": - loss += self.kl_loss( - model.models[lang_pair].encoder.layer_select.layer_samples, - src_lang_idx, - update_num, - sample_size, - ) - if self.decoder_latent_layer: - none_samples = sum( - 1 if x is None else 0 - for x in model.models[lang_pair].decoder.layer_select.layer_samples - ) - if none_samples == 0 or self.args.prior != "agged_posterior": - loss += self.kl_loss( - model.models[lang_pair].decoder.layer_select.layer_samples, - tgt_lang_idx, - update_num, - sample_size, - ) - if ignore_grad: - loss *= 0 - - if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num): - # need to retain the graph if sparsity loss needs to be added - loss.backward(retain_graph=True) - else: - optimizer.backward(loss) - - return loss, sample_size, logging_output - - def train_step( - self, sample, model, criterion, optimizer, update_num, ignore_grad=False - ): - agg_loss, agg_sample_size, agg_logging_output = super().train_step( - sample, model, criterion, optimizer, update_num, ignore_grad - ) - # compute auxiliary loss from layere sparsity, based on all samples from all languages - if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num): - sparsity_loss = 0 - if self.encoder_latent_layer: - sparsity_loss += self.sparsity_loss( - next( - iter(model.models.values()) - ).encoder.layer_select.layer_samples, - update_num, - agg_sample_size, - ) - if self.decoder_latent_layer: - sparsity_loss += self.sparsity_loss( - next( - iter(model.models.values()) - ).decoder.layer_select.layer_samples, - update_num, - agg_sample_size, - ) - if sparsity_loss > 0: - optimizer.backward(sparsity_loss) - return agg_loss, agg_sample_size, agg_logging_output - - def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): - src, tgt = lang_pair.split("-") - if self.encoder_latent_layer: - src_lang_idx = self.src_lang_idx_dict[src] - model.models[lang_pair].encoder.set_lang_idx(src_lang_idx) - if self.decoder_latent_layer: - tgt_lang_idx = self.tgt_lang_idx_dict[tgt] - model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx) - loss, sample_size, logging_output = criterion( - model.models[lang_pair], sample[lang_pair] - ) - return loss, sample_size, logging_output - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - if self.encoder_latent_layer or self.decoder_latent_layer: - for model in models: - if self.encoder_latent_layer: - assert model.encoder.layer_select is not None - src_lang_idx = self.src_lang_idx_dict[self.args.source_lang] - model.encoder.set_lang_idx(src_lang_idx) - if self.decoder_latent_layer: - assert model.decoder.layer_select is not None - tgt_lang_idx = self.tgt_lang_idx_dict[self.args.target_lang] - model.decoder.set_lang_idx(tgt_lang_idx) - return super().inference_step( - generator, models, sample, prefix_tokens, constraints - ) - - @property - def encoder_latent_layer(self): - return ( - safe_hasattr(self.args, "encoder_latent_layer") - and self.args.encoder_latent_layer - ) - - @property - def decoder_latent_layer(self): - return ( - safe_hasattr(self.args, "decoder_latent_layer") - and self.args.decoder_latent_layer - ) - - @property - def src_lang_idx_dict(self): - return {lang: lang_idx for lang_idx, lang in enumerate(self.src_langs)} - - @property - def tgt_lang_idx_dict(self): - return {lang: lang_idx for lang_idx, lang in enumerate(self.tgt_langs)} diff --git a/spaces/multimodalart/TAV-poli-2/inference.py b/spaces/multimodalart/TAV-poli-2/inference.py deleted file mode 100644 index 65ea5c55ce0fdacb37cf6945699824368bb5ff70..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/TAV-poli-2/inference.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import annotations - -import gc -import pathlib -import sys -import tempfile - -import gradio as gr -import imageio -import PIL.Image -import torch -from diffusers.utils.import_utils import is_xformers_available -from einops import rearrange -from huggingface_hub import ModelCard - -sys.path.append('Tune-A-Video') - -from tuneavideo.models.unet import UNet3DConditionModel -from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline - - -class InferencePipeline: - def __init__(self, hf_token: str | None = None): - self.hf_token = hf_token - self.pipe = None - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self.model_id = None - - def clear(self) -> None: - self.model_id = None - del self.pipe - self.pipe = None - torch.cuda.empty_cache() - gc.collect() - - @staticmethod - def check_if_model_is_local(model_id: str) -> bool: - return pathlib.Path(model_id).exists() - - @staticmethod - def get_model_card(model_id: str, - hf_token: str | None = None) -> ModelCard: - if InferencePipeline.check_if_model_is_local(model_id): - card_path = (pathlib.Path(model_id) / 'README.md').as_posix() - else: - card_path = model_id - return ModelCard.load(card_path, token=hf_token) - - @staticmethod - def get_base_model_info(model_id: str, hf_token: str | None = None) -> str: - card = InferencePipeline.get_model_card(model_id, hf_token) - return card.data.base_model - - def load_pipe(self, model_id: str) -> None: - if model_id == self.model_id: - return - base_model_id = self.get_base_model_info(model_id, self.hf_token) - unet = UNet3DConditionModel.from_pretrained( - model_id, - subfolder='unet', - torch_dtype=torch.float16, - use_auth_token=self.hf_token) - pipe = TuneAVideoPipeline.from_pretrained(base_model_id, - unet=unet, - torch_dtype=torch.float16, - use_auth_token=self.hf_token) - pipe = pipe.to(self.device) - if is_xformers_available(): - pipe.unet.enable_xformers_memory_efficient_attention() - self.pipe = pipe - self.model_id = model_id # type: ignore - - def run( - self, - model_id: str, - prompt: str, - video_length: int, - fps: int, - seed: int, - n_steps: int, - guidance_scale: float, - ) -> PIL.Image.Image: - if not torch.cuda.is_available(): - raise gr.Error('CUDA is not available.') - - self.load_pipe(model_id) - - generator = torch.Generator(device=self.device).manual_seed(seed) - out = self.pipe( - prompt, - video_length=video_length, - width=512, - height=512, - num_inference_steps=n_steps, - guidance_scale=guidance_scale, - generator=generator, - ) # type: ignore - - frames = rearrange(out.videos[0], 'c t h w -> t h w c') - frames = (frames * 255).to(torch.uint8).numpy() - - out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) - writer = imageio.get_writer(out_file.name, fps=fps) - for frame in frames: - writer.append_data(frame) - writer.close() - - return out_file.name diff --git a/spaces/multimodalart/Tune-A-Video-Training-UI-poli/inference.py b/spaces/multimodalart/Tune-A-Video-Training-UI-poli/inference.py deleted file mode 100644 index 65ea5c55ce0fdacb37cf6945699824368bb5ff70..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/Tune-A-Video-Training-UI-poli/inference.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import annotations - -import gc -import pathlib -import sys -import tempfile - -import gradio as gr -import imageio -import PIL.Image -import torch -from diffusers.utils.import_utils import is_xformers_available -from einops import rearrange -from huggingface_hub import ModelCard - -sys.path.append('Tune-A-Video') - -from tuneavideo.models.unet import UNet3DConditionModel -from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline - - -class InferencePipeline: - def __init__(self, hf_token: str | None = None): - self.hf_token = hf_token - self.pipe = None - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self.model_id = None - - def clear(self) -> None: - self.model_id = None - del self.pipe - self.pipe = None - torch.cuda.empty_cache() - gc.collect() - - @staticmethod - def check_if_model_is_local(model_id: str) -> bool: - return pathlib.Path(model_id).exists() - - @staticmethod - def get_model_card(model_id: str, - hf_token: str | None = None) -> ModelCard: - if InferencePipeline.check_if_model_is_local(model_id): - card_path = (pathlib.Path(model_id) / 'README.md').as_posix() - else: - card_path = model_id - return ModelCard.load(card_path, token=hf_token) - - @staticmethod - def get_base_model_info(model_id: str, hf_token: str | None = None) -> str: - card = InferencePipeline.get_model_card(model_id, hf_token) - return card.data.base_model - - def load_pipe(self, model_id: str) -> None: - if model_id == self.model_id: - return - base_model_id = self.get_base_model_info(model_id, self.hf_token) - unet = UNet3DConditionModel.from_pretrained( - model_id, - subfolder='unet', - torch_dtype=torch.float16, - use_auth_token=self.hf_token) - pipe = TuneAVideoPipeline.from_pretrained(base_model_id, - unet=unet, - torch_dtype=torch.float16, - use_auth_token=self.hf_token) - pipe = pipe.to(self.device) - if is_xformers_available(): - pipe.unet.enable_xformers_memory_efficient_attention() - self.pipe = pipe - self.model_id = model_id # type: ignore - - def run( - self, - model_id: str, - prompt: str, - video_length: int, - fps: int, - seed: int, - n_steps: int, - guidance_scale: float, - ) -> PIL.Image.Image: - if not torch.cuda.is_available(): - raise gr.Error('CUDA is not available.') - - self.load_pipe(model_id) - - generator = torch.Generator(device=self.device).manual_seed(seed) - out = self.pipe( - prompt, - video_length=video_length, - width=512, - height=512, - num_inference_steps=n_steps, - guidance_scale=guidance_scale, - generator=generator, - ) # type: ignore - - frames = rearrange(out.videos[0], 'c t h w -> t h w c') - frames = (frames * 255).to(torch.uint8).numpy() - - out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) - writer = imageio.get_writer(out_file.name, fps=fps) - for frame in frames: - writer.append_data(frame) - writer.close() - - return out_file.name diff --git a/spaces/mya-mya/SentenceMixer/dummymixer.py b/spaces/mya-mya/SentenceMixer/dummymixer.py deleted file mode 100644 index 66b93da405c7ac641699e6feaca251965b859d8c..0000000000000000000000000000000000000000 --- a/spaces/mya-mya/SentenceMixer/dummymixer.py +++ /dev/null @@ -1,6 +0,0 @@ -from mixer import Mixer - - -class DummyMixer(Mixer): - def mix_sentences(self, sentence_A: str, sentence_B: str, A_ratio: float, max_n_tokens: int = 140): - return f"This is a DummyMixer. {sentence_A=}, {sentence_B=} at {A_ratio=}, {max_n_tokens=}" diff --git a/spaces/nanom/to_passive_voice/css/style.css b/spaces/nanom/to_passive_voice/css/style.css deleted file mode 100644 index 2824040b802a76ef1a4cf3bc6fc13fcdda895714..0000000000000000000000000000000000000000 --- a/spaces/nanom/to_passive_voice/css/style.css +++ /dev/null @@ -1,228 +0,0 @@ -.container { - max-width: 90%; - margin: auto; -} - -h1, h2, h3, h4, h5, h6 { - margin-top: 0; - margin-bottom: 0.5rem; -} - -h1, h2, h3, h4, h5, h6, -.h1, .h2, .h3, .h4, .h5, .h6 { - margin-bottom: 0.5rem; - font-weight: 500; - line-height: 1.2; -} - -h1, .h1 { - font-size: 2.5rem; -} - -h2, .h2 { - font-size: 2rem; -} - -h3, .h3 { - font-size: 1.75rem; -} - -h4, .h4 { - font-size: 1.5rem; -} - -h5, .h5 { - font-size: 1.25rem; -} - -h6, .h6 { - font-size: 1rem; -} - -.badge { - display: inline-block; - padding: 0.25em 0.4em; - font-size: 75%; - font-weight: 700; - line-height: 1; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: 0.25rem; - transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; -} - -.badge-pill { - padding-right: 0.6em; - padding-left: 0.6em; - border-radius: 10rem; -} - -.badge-primary { - color: #fff; - background-color: #007bff; -} - -.badge-secondary { - color: #fff; - background-color: #6c757d; -} - -.badge-success { - color: #fff; - background-color: #28a745; -} - -.badge-info { - color: #fff; - background-color: #17a2b8; -} - -.badge-warning { - color: #212529; - background-color: #ffc107; -} - -.badge-danger { - color: #fff; - background-color: #dc3545; -} - -.badge-light { - color: #212529; - background-color: #f8f9fa; -} - -.badge-dark { - color: #fff; - background-color: #343a40; -} - -.alert { - position: relative; - padding: 0.75rem 1.25rem; - margin-bottom: 1rem; - border: 1px solid transparent; - border-radius: 0.25rem; -} - -.alert-primary { - color: #004085; - background-color: #cce5ff; - border-color: #b8daff; -} - -.alert-secondary { - color: #383d41; - background-color: #e2e3e5; - border-color: #d6d8db; -} - -.alert-success { - color: #155724; - background-color: #d4edda; - border-color: #c3e6cb; -} - -.alert-info { - color: #0c5460; - background-color: #d1ecf1; - border-color: #bee5eb; -} - -.alert-warning { - color: #856404; - background-color: #fff3cd; - border-color: #ffeeba; -} - -.alert-danger { - color: #721c24; - background-color: #f8d7da; - border-color: #f5c6cb; -} - -.alert-light { - color: #818182; - background-color: #fefefe; - border-color: #fdfdfe; -} - -.alert-dark { - color: #1b1e21; - background-color: #d6d8d9; - border-color: #c6c8ca; -} - -.btn { - display: inline-block; - font-weight: 400; - color: #212529; - text-align: center; - vertical-align: middle; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; - background-color: transparent; - border: 1px solid transparent; - padding: 0.375rem 0.75rem; - font-size: 1rem; - line-height: 1.5; - border-radius: 0.25rem; - transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; -} - -.btn-primary { - color: #fff; - background-color: #007bff; - border-color: #007bff; -} - -.btn-secondary { - color: #fff; - background-color: #6c757d; - border-color: #6c757d; -} - -.btn-success { - color: #fff; - background-color: #28a745; - border-color: #28a745; -} - -.btn-info { - color: #fff; - background-color: #17a2b8; - border-color: #17a2b8; -} - -.btn-warning { - color: #212529; - background-color: #ffc107; - border-color: #ffc107; -} - -.btn-danger { - color: #fff; - background-color: #dc3545; - border-color: #dc3545; -} - -.btn-light { - color: #212529; - background-color: #f8f9fa; - border-color: #f8f9fa; -} - -.btn-dark { - color: #fff; - background-color: #343a40; - border-color: #343a40; -} - -.btn-sm, .btn-group-sm > .btn { - padding: 0.25rem 0.5rem; - font-size: 0.875rem; - border-radius: 0.2rem; -} \ No newline at end of file diff --git a/spaces/nbiish/ghostDance/README.md b/spaces/nbiish/ghostDance/README.md deleted file mode 100644 index df4405db6caf1dfa9ee43d8ddc7e140ca5ba7241..0000000000000000000000000000000000000000 --- a/spaces/nbiish/ghostDance/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GhostDance -emoji: -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Write A Password Cracking Program In Java.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Write A Password Cracking Program In Java.md deleted file mode 100644 index 01d96974f591c0d2a2682903d0a68335b28c8356..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Write A Password Cracking Program In Java.md +++ /dev/null @@ -1,84 +0,0 @@ - -

How To Write A Password Cracking Program In Java

-

Password cracking is the process of finding out the original password from a hashed or encrypted one. Password cracking can be done for various purposes, such as testing the security of a system, recovering lost passwords, or hacking into someone else's account. In this article, we will show you how to write a simple password cracking program in Java using recursion and brute force.

-

What is recursion?

-

Recursion is a technique in programming where a function calls itself repeatedly until a base case is reached. A base case is a condition that stops the recursion and returns a value. Recursion can be used to solve problems that involve breaking down a complex problem into smaller and simpler subproblems.

-

How To Write A Password Cracking Program In Java


Downloadhttps://urlcod.com/2uIamV



-

What is brute force?

-

Brute force is a method of solving problems by trying all possible combinations of inputs until a solution is found. Brute force is usually not efficient or elegant, but it can be effective for some problems that have no better algorithm. Brute force can also be used to test the strength of a password by trying all possible passwords until the correct one is found.

-

How to write a password cracking program in Java?

-

To write a password cracking program in Java, we need to do the following steps:

-
    -
  1. Create a class called Password that generates a random password of a given length using letters and digits.
  2. -
  3. Create a method called isPassword that compares a given string to the password and returns true if they are equal and false otherwise.
  4. -
  5. Create a recursive method called findPassword that takes a Password object and an integer length as parameters and returns the password if it is found or an empty string otherwise.
  6. -
  7. In the findPassword method, use a loop to iterate through all possible characters from 0 to Z and append them to a string.
  8. -
  9. Call the findPassword method recursively with the updated string and length until the length reaches zero or the password is found.
  10. -
  11. Print the result of the findPassword method.
  12. -
-

Here is an example code for the password cracking program in Java:

- -```java -import java.util.Random; - -public class PasswordCracker { - - // A class that generates a random password of a given length - public static class Password - private String _password = ""; - - // Constructor that generates a password - public Password(int length) - Random generator = new Random(); - for (int i = 0; i < length; ++i) - this._password = this._password + (char) (generator.nextInt(36) + 48); - - - - // Method that compares a given string to the password - public boolean isPassword(String st) - return st.equals(this._password); - - - - // A recursive method that finds the password by brute force - public static String findPassword(Password p, int length) - String pswd = ""; - char char2Check = '0'; - - if (length == 0) - return ""; - else - return findPassword(p, length, pswd, char2Check); - - - - // An overloaded method that finds the password by brute force - public static String findPassword(Password p, int length, String pswd, char char2Check) { - if (pswd.length() < length) - pswd += char2Check; - pswd = findPassword(p, length, pswd, char2Check); - if (pswd.length() < length) - if (char2Check == 'Z') - return pswd; - - if (char2Check < 'Z') - char2Check++; - pswd += char2Check; - return findPassword(p, length, pswd, char2Check); - - - - if (pswd.length() == length) { - System.out.println(pswd); - if (p.isPassword(pswd)) - return pswd; - - if (char2Check < 'Z') { - pswd = pswd.substring(0, pswd.length() - 1); - char2Check++; - pswd += char2Check; - return findPassword(p, length, pswd

-

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Powermta 4 5 Nulled 22.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Powermta 4 5 Nulled 22.md deleted file mode 100644 index 53ac8396c9d5d5c8b21ef88de7e28280d51f68b7..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Powermta 4 5 Nulled 22.md +++ /dev/null @@ -1,67 +0,0 @@ -
- - -
-

PowerMTA 4.5 Nulled 22: What Is It And Why You Need It

-

If you are looking for a powerful and reliable email marketing solution, you might have heard of PowerMTA. PowerMTA is an enterprise-grade email message transfer agent (MTA) that can send high-volume and mission critical messages with unsurpassed delivery speed and accuracy. PowerMTA is used by some of the world's largest email service providers, marketers, and enterprises.

-

But what does it mean to use a nulled version of PowerMTA? And what is special about version 22? In this article, we will explain what PowerMTA is, what nulled means, and what version 22 offers. We will also compare the pros and cons of using a nulled version of PowerMTA versus a licensed one. Finally, we will provide a step-by-step guide on how to install and use PowerMTA 4.5 nulled 22.

-

Powermta 4 5 Nulled 22


Download Zip ⚙⚙⚙ https://urlcod.com/2uI9OH



-

PowerMTA Features And Benefits

-

PowerMTA is a software that can handle the complex and dynamic challenges of modern email delivery. It offers a range of features and benefits that make it stand out from other email marketing solutions. Here are some of them:

-

High-Volume And Mission Critical Email Delivery

-

PowerMTA can send millions of emails per hour with minimal hardware and network resources. It can also ensure that your emails reach the inbox of your recipients with high deliverability and low bounce rates. PowerMTA uses advanced algorithms and protocols to avoid spam traps, blacklists, and reputation issues. It also supports authentication standards such as SPF, DKIM, and DMARC to enhance your sender reputation and trustworthiness.

-

Granular Connection Controls And Throttling

-

PowerMTA can optimize your email delivery rates and avoid spam filters by adjusting the connection settings for each domain, ISP, or recipient. You can control the number of concurrent connections, the message rate, the retry interval, the delivery window, and other parameters to match the requirements and preferences of each destination. PowerMTA can also throttle your email sending based on feedback loops, bounce messages, and SMTP responses to prevent overloading or upsetting the receiving servers.

-

Advanced Queue Management And Routing

-

PowerMTA can prioritize and route your emails based on various criteria such as sender, recipient, subject, content, time, etc. You can create custom rules and policies to segment your email traffic and assign different delivery modes and settings for each segment. PowerMTA can also balance the load among multiple IP addresses and domains to improve your performance and avoid bottlenecks.

-

Real-Time Monitoring And Reporting

-

PowerMTA can provide live feedback and analytics on your email performance. You can monitor the status of your email campaigns, track the delivery rates, open rates, click rates, bounce rates, unsubscribe rates, etc. You can also access detailed logs and reports on each email transaction, including the SMTP responses, the delivery time, the IP address, the domain, etc. PowerMTA can also integrate with third-party tools and platforms such as Google Analytics, Mailchimp, Sendgrid, etc. to enhance your visibility and insights.

-

Flexible Configuration And Integration

-

PowerMTA can be customized and integrated with various platforms and tools to suit your needs and preferences. You can configure PowerMTA using a simple text file or a web-based interface. You can also use APIs and scripts to automate and control PowerMTA remotely. PowerMTA can work with any email marketing software or application that supports SMTP protocol. You can also use PowerMTA with cloud services such as Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), etc. to scale up or down your email infrastructure as needed.

-

-

PowerMTA Nulled Vs Licensed

-

As you can see, PowerMTA is a powerful and reliable email marketing solution that can help you grow your business and reach your goals. But how much does it cost to use PowerMTA? And what are the advantages and disadvantages of using a nulled version of PowerMTA versus a licensed one?

-

A licensed version of PowerMTA costs $5,500 per year for one server with unlimited domains and IP addresses. This includes technical support, updates, bug fixes, security patches, etc. A nulled version of PowerMTA is a cracked or modified version of PowerMTA that bypasses the license verification process and allows you to use PowerMTA for free without paying for it.

-

Using a nulled version of PowerMTA might seem tempting if you want to save money or test PowerMTA before buying it. However, there are some risks and drawbacks associated with using a nulled version of PowerMTA that you should be aware of before making a decision.

-

Pros Of PowerMTA Nulled

-

Some of the advantages of using a nulled version of PowerMTA are:

-
    -
  • You can save money by not paying for the license fee.
  • -
  • You can use PowerMTA on unlimited servers, domains, and IP addresses without any restrictions.
  • -
  • You can access all the features and benefits of PowerMTA without any limitations.
  • -
  • You can test PowerMTA before buying it to see if it suits your needs and expectations.
  • -
-

Cons Of PowerMTA Nulled

-

Some of the disadvantages of using a n ulled version of PowerMTA are:

-
    -
  • You can expose your server and your email campaigns to security risks, such as malware, viruses, hackers, etc. A nulled version of PowerMTA might contain hidden code or backdoors that can compromise your data and your reputation.
  • -
  • You can face legal issues, such as lawsuits, fines, or penalties, for violating the intellectual property rights of the PowerMTA developers. Using a nulled version of PowerMTA is illegal and unethical, and you might get caught and sued by the authorities or the PowerMTA team.
  • -
  • You can miss out on the technical support, updates, bug fixes, security patches, etc. that come with a licensed version of PowerMTA. A nulled version of PowerMTA might not work properly or might stop working altogether due to compatibility issues or errors. You will not be able to get any help or assistance from the PowerMTA team or the community if you encounter any problems or challenges.
  • -
-

How To Install And Use PowerMTA 4.5 Nulled 22

-

If you still want to try PowerMTA 4.5 nulled 22 despite the risks and drawbacks, you will need to follow some steps to install and use it. Here is a step-by-step guide on how to install and use PowerMTA 4.5 nulled 22:

-

Download And Extract The Files

-

The first step is to download the files from a reliable source. You can find the download link for PowerMTA 4.5 nulled 22 on various websites and forums, but be careful and check the reviews and comments before downloading anything. You might end up downloading a fake or corrupted file that can harm your server or your email campaigns.

-

Once you have downloaded the file, you will need to extract it using a tool such as WinRAR or 7-Zip. You will get a folder with several files and subfolders inside. You will need these files for the next steps.

-

Install The Dependencies And The Software

-

The next step is to install the required dependencies and the software on your server. You can use either Linux or Windows as your operating system, but Linux is recommended for better performance and stability.

-

If you are using Linux, you will need to install some packages such as gcc, make, openssl, libssl-dev, etc. You can use the following command to install them:

-sudo apt-get update && sudo apt-get install gcc make openssl libssl-dev -

If you are using Windows, you will need to install some software such as Visual C++ Redistributable Packages for Visual Studio 2013, OpenSSL for Windows, etc. You can download them from their official websites and follow the instructions to install them.

-

After installing the dependencies, you will need to install the software itself. You can do this by running the installation script or the executable file depending on your operating system.

-

If you are using Linux, you will need to run the following command from the folder where you extracted the files:

-sudo ./install.sh -

If you are using Windows, you will need to run the following file from the folder where you extracted the files:

-PowerMTA-4.5r8-win-x64.exe -

The installation process will take some time and will ask you some questions along the way. You can answer them according to your preferences or leave them as default.

-

Configure The Settings And The License Key

-

The next step is to configure the settings and enter the license key for PowerMTA. You will need to edit the configuration file and enter the license key in the appropriate places.

-

The configuration file is located in /etc/pmta/config on Linux or C:\PMTA\config on Windows. You can use any text editor to open and edit it.

-

The configuration file contains various sections and parameters that control how PowerMTA works. You can customize them according to your needs and preferences, but be careful not to make any mistakes or typos that can cause errors or problems.

-

Some of the most important sections and parameters that you should pay attention to are:

-
    -
  • <source>: This section defines where PowerMTA gets its email messages from. You can specify an SMTP server, a local directory, a remote directory, etc.
  • -
  • <domain>: This section defines how PowerMTA handles each domain that it sends emails to. You can specify various settings such as connection limits, message rates, delivery modes, authentication methods, etc.
  • -
  • <virtual-mta>

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/neuralmagic/sparse-mpt-7b-gsm8k/app.py b/spaces/neuralmagic/sparse-mpt-7b-gsm8k/app.py deleted file mode 100644 index 27ef52fc3f7105ae512d7024f12db497530f2472..0000000000000000000000000000000000000000 --- a/spaces/neuralmagic/sparse-mpt-7b-gsm8k/app.py +++ /dev/null @@ -1,228 +0,0 @@ -import deepsparse -import gradio as gr -from typing import Tuple, List - -deepsparse.cpu.print_hardware_capability() - -MODEL_ID = "hf:neuralmagic/mpt-7b-gsm8k-pruned60-quant" - -DESCRIPTION = f""" -# MPT Sparse Finetuned on GSM8k with DeepSparse -![NM Logo](https://files.slack.com/files-pri/T020WGRLR8A-F05TXD28BBK/neuralmagic-logo.png?pub_secret=54e8db19db) -Model ID: {MODEL_ID} - -🚀 **Experience the power of LLM mathematical reasoning** through [our MPT sparse finetuned](https://arxiv.org/abs/2310.06927) on the [GSM8K dataset](https://huggingface.co/datasets/gsm8k). -GSM8K, short for Grade School Math 8K, is a collection of 8.5K high-quality linguistically diverse grade school math word problems, designed to challenge question-answering systems with multi-step reasoning. -Observe the model's performance in deciphering complex math questions, such as "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?" and offering detailed step-by-step solutions. -## Accelerated Inferenced on CPUs -The MPT model runs purely on CPU courtesy of [sparse software execution by DeepSparse](https://github.com/neuralmagic/deepsparse/tree/main/research/mpt). -DeepSparse provides accelerated inference by taking advantage of the MPT model's weight sparsity to deliver tokens fast! - -![Speedup](https://cdn-uploads.huggingface.co/production/uploads/60466e4b4f40b01b66151416/qMW-Uq8xAawhANTZYB7ZI.png) -""" - -MAX_MAX_NEW_TOKENS = 1024 -DEFAULT_MAX_NEW_TOKENS = 200 - -# Setup the engine -pipe = deepsparse.Pipeline.create( - task="text-generation", - model_path=MODEL_ID, - sequence_length=MAX_MAX_NEW_TOKENS, - prompt_sequence_length=16, -) - -def clear_and_save_textbox(message: str) -> Tuple[str, str]: - return "", message - - -def display_input( - message: str, history: List[Tuple[str, str]] -) -> List[Tuple[str, str]]: - history.append((message, "")) - return history - - -def delete_prev_fn(history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], str]: - try: - message, _ = history.pop() - except IndexError: - message = "" - return history, message or "" - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(DESCRIPTION) - with gr.Column(): - gr.Markdown("""### MPT GSM Sparse Finetuned Demo""") - - with gr.Group(): - chatbot = gr.Chatbot(label="Chatbot") - with gr.Row(): - textbox = gr.Textbox(container=False,placeholder="Type a message...",scale=10,) - submit_button = gr.Button("Submit", variant="primary", scale=1, min_width=0) - - with gr.Row(): - retry_button = gr.Button("🔄 Retry", variant="secondary") - undo_button = gr.Button("↩️ Undo", variant="secondary") - clear_button = gr.Button("🗑️ Clear", variant="secondary") - - saved_input = gr.State() - - gr.Examples(examples=[ - "James decides to run 3 sprints 3 times a week. He runs 60 meters each sprint. How many total meters does he run a week?", - "Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?", - "Gretchen has 110 coins. There are 30 more gold coins than silver coins. How many gold coins does Gretchen have?",],inputs=[textbox],) - - max_new_tokens = gr.Slider( - label="Max new tokens", - value=DEFAULT_MAX_NEW_TOKENS, - minimum=0, - maximum=MAX_MAX_NEW_TOKENS, - step=1, - interactive=True, - info="The maximum numbers of new tokens",) - temperature = gr.Slider( - label="Temperature", - value=0.3, - minimum=0.05, - maximum=1.0, - step=0.05, - interactive=True, - info="Higher values produce more diverse outputs", - ) - top_p = gr.Slider( - label="Top-p (nucleus) sampling", - value=0.40, - minimum=0.0, - maximum=1, - step=0.05, - interactive=True, - info="Higher values sample more low-probability tokens", - ) - top_k = gr.Slider( - label="Top-k sampling", - value=20, - minimum=1, - maximum=100, - step=1, - interactive=True, - info="Sample from the top_k most likely tokens", - ) - repetition_penalty = gr.Slider( - label="Repetition penalty", - value=1.2, - minimum=1.0, - maximum=2.0, - step=0.05, - interactive=True, - info="Penalize repeated tokens", - ) - - # Generation inference - def generate( - message, - history, - max_new_tokens: int, - temperature: float, - top_p: float, - top_k: int, - repetition_penalty: float, - ): - generation_config = { "max_new_tokens": max_new_tokens,"temperature": temperature,"top_p": top_p,"top_k": top_k,"repetition_penalty": repetition_penalty,} - inference = pipe(sequences=message, streaming=True, **generation_config) - history[-1][1] += message - for token in inference: - history[-1][1] += token.generations[0].text - yield history - print(pipe.timer_manager) - textbox.submit( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).success( - generate, - inputs=[ - saved_input, - chatbot, - max_new_tokens, - temperature, - top_p, - top_k, - repetition_penalty, - ], - outputs=[chatbot], - api_name=False, - ) - - submit_button.click( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).success( - generate, - inputs=[saved_input, chatbot, max_new_tokens, temperature], - outputs=[chatbot], - api_name=False, - ) - - retry_button.click( - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - generate, - inputs=[saved_input, chatbot, max_new_tokens, temperature], - outputs=[chatbot], - api_name=False, - ) - undo_button.click( - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=lambda x: x, - inputs=[saved_input], - outputs=textbox, - api_name=False, - queue=False, - ) - clear_button.click( - fn=lambda: ([], ""), - outputs=[chatbot, saved_input], - queue=False, - api_name=False, - ) - - - - -demo.queue().launch() - \ No newline at end of file diff --git a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/train.py b/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/train.py deleted file mode 100644 index 8cd6e0e644bf013f82b37f473debc5b73a91f690..0000000000000000000000000000000000000000 --- a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/train.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import os -import time -import sys -import torch -import logging -import json -import numpy as np -import random -import pickle - -import torch.distributed as dist -from torch.utils.data import DataLoader, RandomSampler - -from src.options import Options -from src import data, beir_utils, slurm, dist_utils, utils -from src import moco, inbatch - - -logger = logging.getLogger(__name__) - - -def train(opt, model, optimizer, scheduler, step): - - run_stats = utils.WeightedAvgStats() - - tb_logger = utils.init_tb_logger(opt.output_dir) - - logger.info("Data loading") - if isinstance(model, torch.nn.parallel.DistributedDataParallel): - tokenizer = model.module.tokenizer - else: - tokenizer = model.tokenizer - collator = data.Collator(opt=opt) - train_dataset = data.load_data(opt, tokenizer) - logger.warning(f"Data loading finished for rank {dist_utils.get_rank()}") - - train_sampler = RandomSampler(train_dataset) - train_dataloader = DataLoader( - train_dataset, - sampler=train_sampler, - batch_size=opt.per_gpu_batch_size, - drop_last=True, - num_workers=opt.num_workers, - collate_fn=collator, - ) - - epoch = 1 - - model.train() - while step < opt.total_steps: - train_dataset.generate_offset() - - logger.info(f"Start epoch {epoch}") - for i, batch in enumerate(train_dataloader): - step += 1 - - batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()} - train_loss, iter_stats = model(**batch, stats_prefix="train") - - train_loss.backward() - optimizer.step() - - scheduler.step() - model.zero_grad() - - run_stats.update(iter_stats) - - if step % opt.log_freq == 0: - log = f"{step} / {opt.total_steps}" - for k, v in sorted(run_stats.average_stats.items()): - log += f" | {k}: {v:.3f}" - if tb_logger: - tb_logger.add_scalar(k, v, step) - log += f" | lr: {scheduler.get_last_lr()[0]:0.3g}" - log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB" - - logger.info(log) - run_stats.reset() - - if step % opt.eval_freq == 0: - if isinstance(model, torch.nn.parallel.DistributedDataParallel): - encoder = model.module.get_encoder() - else: - encoder = model.get_encoder() - eval_model( - opt, query_encoder=encoder, doc_encoder=encoder, tokenizer=tokenizer, tb_logger=tb_logger, step=step - ) - - if dist_utils.is_main(): - utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"lastlog") - - model.train() - - if dist_utils.is_main() and step % opt.save_freq == 0: - utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"step-{step}") - - if step > opt.total_steps: - break - epoch += 1 - - -def eval_model(opt, query_encoder, doc_encoder, tokenizer, tb_logger, step): - for datasetname in opt.eval_datasets: - metrics = beir_utils.evaluate_model( - query_encoder, - doc_encoder, - tokenizer, - dataset=datasetname, - batch_size=opt.per_gpu_eval_batch_size, - norm_doc=opt.norm_doc, - norm_query=opt.norm_query, - beir_dir=opt.eval_datasets_dir, - score_function=opt.score_function, - lower_case=opt.lower_case, - normalize_text=opt.eval_normalize_text, - ) - - message = [] - if dist_utils.is_main(): - for metric in ["NDCG@10", "Recall@10", "Recall@100"]: - message.append(f"{datasetname}/{metric}: {metrics[metric]:.2f}") - if tb_logger is not None: - tb_logger.add_scalar(f"{datasetname}/{metric}", metrics[metric], step) - logger.info(" | ".join(message)) - - -if __name__ == "__main__": - logger.info("Start") - - options = Options() - opt = options.parse() - - torch.manual_seed(opt.seed) - slurm.init_distributed_mode(opt) - slurm.init_signal_handler() - - directory_exists = os.path.isdir(opt.output_dir) - if dist.is_initialized(): - dist.barrier() - os.makedirs(opt.output_dir, exist_ok=True) - if not directory_exists and dist_utils.is_main(): - options.print_options(opt) - if dist.is_initialized(): - dist.barrier() - utils.init_logger(opt) - - os.environ["TOKENIZERS_PARALLELISM"] = "false" - - if opt.contrastive_mode == "moco": - model_class = moco.MoCo - elif opt.contrastive_mode == "inbatch": - model_class = inbatch.InBatch - else: - raise ValueError(f"contrastive mode: {opt.contrastive_mode} not recognised") - - if not directory_exists and opt.model_path == "none": - model = model_class(opt) - model = model.cuda() - optimizer, scheduler = utils.set_optim(opt, model) - step = 0 - elif directory_exists: - model_path = os.path.join(opt.output_dir, "checkpoint", "latest") - model, optimizer, scheduler, opt_checkpoint, step = utils.load( - model_class, - model_path, - opt, - reset_params=False, - ) - logger.info(f"Model loaded from {opt.output_dir}") - else: - model, optimizer, scheduler, opt_checkpoint, step = utils.load( - model_class, - opt.model_path, - opt, - reset_params=False if opt.continue_training else True, - ) - if not opt.continue_training: - step = 0 - logger.info(f"Model loaded from {opt.model_path}") - - logger.info(utils.get_parameters(model)) - - if dist.is_initialized(): - model = torch.nn.parallel.DistributedDataParallel( - model, - device_ids=[opt.local_rank], - output_device=opt.local_rank, - find_unused_parameters=False, - ) - dist.barrier() - - logger.info("Start training") - train(opt, model, optimizer, scheduler, step) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/registry.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/registry.py deleted file mode 100644 index d9c8817a743e42b2aec382818f0cc1bb39a66004..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/losses/registry.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from detectron2.utils.registry import Registry - -DENSEPOSE_LOSS_REGISTRY = Registry("DENSEPOSE_LOSS") diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/tracking/test_hungarian_tracker.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/tracking/test_hungarian_tracker.py deleted file mode 100644 index 660c635990a3370945e7f14422dcd978320e4782..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/tracking/test_hungarian_tracker.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -from typing import Dict -import torch - -from detectron2.config import instantiate -from detectron2.structures import Boxes, Instances - - -class TestBaseHungarianTracker(unittest.TestCase): - def setUp(self): - self._img_size = np.array([600, 800]) - self._prev_boxes = np.array( - [ - [101, 101, 200, 200], - [301, 301, 450, 450], - ] - ).astype(np.float32) - self._prev_scores = np.array([0.9, 0.9]) - self._prev_classes = np.array([1, 1]) - self._prev_masks = np.ones((2, 600, 800)).astype("uint8") - self._curr_boxes = np.array( - [ - [302, 303, 451, 452], - [101, 102, 201, 203], - ] - ).astype(np.float32) - self._curr_scores = np.array([0.95, 0.85]) - self._curr_classes = np.array([1, 1]) - self._curr_masks = np.ones((2, 600, 800)).astype("uint8") - - self._prev_instances = { - "image_size": self._img_size, - "pred_boxes": self._prev_boxes, - "scores": self._prev_scores, - "pred_classes": self._prev_classes, - "pred_masks": self._prev_masks, - } - self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances) - self._curr_instances = { - "image_size": self._img_size, - "pred_boxes": self._curr_boxes, - "scores": self._curr_scores, - "pred_classes": self._curr_classes, - "pred_masks": self._curr_masks, - } - self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances) - - self._max_num_instances = 200 - self._max_lost_frame_count = 0 - self._min_box_rel_dim = 0.02 - self._min_instance_period = 1 - self._track_iou_threshold = 0.5 - - def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances: - """ - convert prediction from Dict to D2 Instances format - """ - res = Instances( - image_size=torch.IntTensor(prediction["image_size"]), - pred_boxes=Boxes(torch.FloatTensor(prediction["pred_boxes"])), - pred_masks=torch.IntTensor(prediction["pred_masks"]), - pred_classes=torch.IntTensor(prediction["pred_classes"]), - scores=torch.FloatTensor(prediction["scores"]), - ) - return res - - def test_init(self): - cfg = { - "_target_": "detectron2.tracking.hungarian_tracker.BaseHungarianTracker", - "video_height": self._img_size[0], - "video_width": self._img_size[1], - "max_num_instances": self._max_num_instances, - "max_lost_frame_count": self._max_lost_frame_count, - "min_box_rel_dim": self._min_box_rel_dim, - "min_instance_period": self._min_instance_period, - "track_iou_threshold": self._track_iou_threshold, - } - tracker = instantiate(cfg) - self.assertTrue(tracker._video_height == self._img_size[0]) - - def test_initialize_extra_fields(self): - cfg = { - "_target_": "detectron2.tracking.hungarian_tracker.BaseHungarianTracker", - "video_height": self._img_size[0], - "video_width": self._img_size[1], - "max_num_instances": self._max_num_instances, - "max_lost_frame_count": self._max_lost_frame_count, - "min_box_rel_dim": self._min_box_rel_dim, - "min_instance_period": self._min_instance_period, - "track_iou_threshold": self._track_iou_threshold, - } - tracker = instantiate(cfg) - instances = tracker._initialize_extra_fields(self._curr_instances) - self.assertTrue(instances.has("ID")) - self.assertTrue(instances.has("ID_period")) - self.assertTrue(instances.has("lost_frame_count")) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/okriyan/README/README.md b/spaces/okriyan/README/README.md deleted file mode 100644 index 0ddce1e0580d4495d68e956f6a189643810f7a26..0000000000000000000000000000000000000000 --- a/spaces/okriyan/README/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: README -emoji: 🦀 -colorFrom: blue -colorTo: indigo -sdk: static -pinned: false ---- - -Edit this `README.md` markdown file to author your organization card. diff --git a/spaces/openpecha/chatbot_tibetan/chat.py b/spaces/openpecha/chatbot_tibetan/chat.py deleted file mode 100644 index 7f997e7f7683862dc06597fcff2a295c4fd7973d..0000000000000000000000000000000000000000 --- a/spaces/openpecha/chatbot_tibetan/chat.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -from typing import Tuple - -import openai - - -class ChatGpt: - def __init__(self, api_key, max_tokens=4096): - self.api_key = api_key - self.max_tokens = max_tokens - self.message_history = [] - self.total_tokens = 0 - - # Set up the OpenAI API client - openai.api_key = self.api_key - - def clear_history(self): - self.message_history = [] - self.total_tokens = 0 - - def add_message(self, role: str, content: str): - self.message_history.append({"role": role, "content": content}) - self._truncate_history() - - def add_system_message(self, content: str): - self.add_message("system", content) - - def generate_response(self, user_input: str) -> str: - self.add_message("user", user_input) - response = self._call_openai_api(self.message_history) - self.add_message("assistant", response) - - return response - - def _truncate_history(self): - while self.total_tokens > self.max_tokens: - if self.message_history[0]["role"] != "system": - self.message_history.pop(0) - else: - break - - def _call_openai_api(self, messages) -> str: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", messages=messages - ) - self.total_tokens += response["usage"]["total_tokens"] - return response["choices"][0]["message"]["content"].strip() - - -if __name__ == "__main__": - chat = ChatGpt(os.getenv("OPENAI_API_KEY")) - - chat.add_system_message("The assistant can answer questions and tell jokes.") - user_input = "Tell me a joke." - user_msg, bot_response = chat.generate_response(user_input) - assert user_msg == user_input - print("User:", user_msg) - print("Assistant:", bot_response) - print("Total Tokens:", chat.total_tokens) - - user_input = "another one" - user_msg, bot_response = chat.generate_response(user_input) - assert user_msg == user_input - print("User:", user_msg) - print("Assistant:", bot_response) - print("Total Tokens:", chat.total_tokens) diff --git a/spaces/osanseviero/hugging-gallery/app.py b/spaces/osanseviero/hugging-gallery/app.py deleted file mode 100644 index 60dbdb66af835159ceff767c59bc5d59beb1adae..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/hugging-gallery/app.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -import os -from glob import glob -import gradio as gr - - -block = gr.Blocks(css=".container { max-width: 800px; margin: auto; }") - - -def infer(): - images = glob('*.png') + glob('*.jpg') + glob('*.jpeg')+ glob('*.gif') - return images - -with block: - with gr.Group(): - with gr.Box(): - gallery=gr.Gallery(label="Hugging Gallery") - block.load(infer, inputs=None, outputs=gallery) - -block.launch() - - - -#import streamlit as st -#from glob import glob -#logos = glob('*.png') + glob('*.jpg') + glob('*.jpeg')+ glob('*.gif') -# -#cols=st.columns(4) -#for i,logo in enumerate(logos): -# cols[i%4].image(logo) \ No newline at end of file diff --git a/spaces/overlordx/starlight/toolbag/__init__.py b/spaces/overlordx/starlight/toolbag/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/p208p2002/chinese-sentence-checking/README.md b/spaces/p208p2002/chinese-sentence-checking/README.md deleted file mode 100644 index 7efaa43683202f99a2268294336a95940640af43..0000000000000000000000000000000000000000 --- a/spaces/p208p2002/chinese-sentence-checking/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chinece Sentence Checking -emoji: 🧑‍⚖️ -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/stable_diffusion.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/stable_diffusion.md deleted file mode 100644 index 31d5f9dc6bb83e542e55fc9216e252660c06b854..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/stable_diffusion.md +++ /dev/null @@ -1,260 +0,0 @@ - - -# Effective and efficient diffusion - -[[open-in-colab]] - -Getting the [`DiffusionPipeline`] to generate images in a certain style or include what you want can be tricky. Often times, you have to run the [`DiffusionPipeline`] several times before you end up with an image you're happy with. But generating something out of nothing is a computationally intensive process, especially if you're running inference over and over again. - -This is why it's important to get the most *computational* (speed) and *memory* (GPU RAM) efficiency from the pipeline to reduce the time between inference cycles so you can iterate faster. - -This tutorial walks you through how to generate faster and better with the [`DiffusionPipeline`]. - -Begin by loading the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model: - -```python -from diffusers import DiffusionPipeline - -model_id = "runwayml/stable-diffusion-v1-5" -pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) -``` - -The example prompt you'll use is a portrait of an old warrior chief, but feel free to use your own prompt: - -```python -prompt = "portrait photo of a old warrior chief" -``` - -## Speed - - - -💡 If you don't have access to a GPU, you can use one for free from a GPU provider like [Colab](https://colab.research.google.com/)! - - - -One of the simplest ways to speed up inference is to place the pipeline on a GPU the same way you would with any PyTorch module: - -```python -pipeline = pipeline.to("cuda") -``` - -To make sure you can use the same image and improve on it, use a [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed for [reproducibility](./using-diffusers/reproducibility): - -```python -import torch - -generator = torch.Generator("cuda").manual_seed(0) -``` - -Now you can generate an image: - -```python -image = pipeline(prompt, generator=generator).images[0] -image -``` - -
    - -
    - -This process took ~30 seconds on a T4 GPU (it might be faster if your allocated GPU is better than a T4). By default, the [`DiffusionPipeline`] runs inference with full `float32` precision for 50 inference steps. You can speed this up by switching to a lower precision like `float16` or running fewer inference steps. - -Let's start by loading the model in `float16` and generate an image: - -```python -import torch - -pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) -pipeline = pipeline.to("cuda") -generator = torch.Generator("cuda").manual_seed(0) -image = pipeline(prompt, generator=generator).images[0] -image -``` - -
    - -
    - -This time, it only took ~11 seconds to generate the image, which is almost 3x faster than before! - - - -💡 We strongly suggest always running your pipelines in `float16`, and so far, we've rarely seen any degradation in output quality. - - - -Another option is to reduce the number of inference steps. Choosing a more efficient scheduler could help decrease the number of steps without sacrificing output quality. You can find which schedulers are compatible with the current model in the [`DiffusionPipeline`] by calling the `compatibles` method: - -```python -pipeline.scheduler.compatibles -[ - diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, - diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, - diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, - diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, - diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, - diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, - diffusers.schedulers.scheduling_ddpm.DDPMScheduler, - diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, - diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, - diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, - diffusers.schedulers.scheduling_pndm.PNDMScheduler, - diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, - diffusers.schedulers.scheduling_ddim.DDIMScheduler, -] -``` - -The Stable Diffusion model uses the [`PNDMScheduler`] by default which usually requires ~50 inference steps, but more performant schedulers like [`DPMSolverMultistepScheduler`], require only ~20 or 25 inference steps. Use the [`ConfigMixin.from_config`] method to load a new scheduler: - -```python -from diffusers import DPMSolverMultistepScheduler - -pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) -``` - -Now set the `num_inference_steps` to 20: - -```python -generator = torch.Generator("cuda").manual_seed(0) -image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] -image -``` - -
    - -
    - -Great, you've managed to cut the inference time to just 4 seconds! ⚡️ - -## Memory - -The other key to improving pipeline performance is consuming less memory, which indirectly implies more speed, since you're often trying to maximize the number of images generated per second. The easiest way to see how many images you can generate at once is to try out different batch sizes until you get an `OutOfMemoryError` (OOM). - -Create a function that'll generate a batch of images from a list of prompts and `Generators`. Make sure to assign each `Generator` a seed so you can reuse it if it produces a good result. - -```python -def get_inputs(batch_size=1): - generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] - prompts = batch_size * [prompt] - num_inference_steps = 20 - - return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} -``` - -Start with `batch_size=4` and see how much memory you've consumed: - -```python -from diffusers.utils import make_image_grid - -images = pipeline(**get_inputs(batch_size=4)).images -make_image_grid(images, 2, 2) -``` - -Unless you have a GPU with more RAM, the code above probably returned an `OOM` error! Most of the memory is taken up by the cross-attention layers. Instead of running this operation in a batch, you can run it sequentially to save a significant amount of memory. All you have to do is configure the pipeline to use the [`~DiffusionPipeline.enable_attention_slicing`] function: - -```python -pipeline.enable_attention_slicing() -``` - -Now try increasing the `batch_size` to 8! - -```python -images = pipeline(**get_inputs(batch_size=8)).images -make_image_grid(images, rows=2, cols=4) -``` - -
    - -
    - -Whereas before you couldn't even generate a batch of 4 images, now you can generate a batch of 8 images at ~3.5 seconds per image! This is probably the fastest you can go on a T4 GPU without sacrificing quality. - -## Quality - -In the last two sections, you learned how to optimize the speed of your pipeline by using `fp16`, reducing the number of inference steps by using a more performant scheduler, and enabling attention slicing to reduce memory consumption. Now you're going to focus on how to improve the quality of generated images. - -### Better checkpoints - -The most obvious step is to use better checkpoints. The Stable Diffusion model is a good starting point, and since its official launch, several improved versions have also been released. However, using a newer version doesn't automatically mean you'll get better results. You'll still have to experiment with different checkpoints yourself, and do a little research (such as using [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)) to get the best results. - -As the field grows, there are more and more high-quality checkpoints finetuned to produce certain styles. Try exploring the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) and [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) to find one you're interested in! - -### Better pipeline components - -You can also try replacing the current pipeline components with a newer version. Let's try loading the latest [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae) from Stability AI into the pipeline, and generate some images: - -```python -from diffusers import AutoencoderKL - -vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") -pipeline.vae = vae -images = pipeline(**get_inputs(batch_size=8)).images -make_image_grid(images, rows=2, cols=4) -``` - -
    - -
    - -### Better prompt engineering - -The text prompt you use to generate an image is super important, so much so that it is called *prompt engineering*. Some considerations to keep during prompt engineering are: - -- How is the image or similar images of the one I want to generate stored on the internet? -- What additional detail can I give that steers the model towards the style I want? - -With this in mind, let's improve the prompt to include color and higher quality details: - -```python -prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" -prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" -``` - -Generate a batch of images with the new prompt: - -```python -images = pipeline(**get_inputs(batch_size=8)).images -make_image_grid(images, rows=2, cols=4) -``` - -
    - -
    - -Pretty impressive! Let's tweak the second image - corresponding to the `Generator` with a seed of `1` - a bit more by adding some text about the age of the subject: - -```python -prompts = [ - "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", - "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", - "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", - "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", -] - -generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] -images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images -make_image_grid(images, 2, 2) -``` - -
    - -
    - -## Next steps - -In this tutorial, you learned how to optimize a [`DiffusionPipeline`] for computational and memory efficiency as well as improving the quality of generated outputs. If you're interested in making your pipeline even faster, take a look at the following resources: - -- Learn how [PyTorch 2.0](./optimization/torch2.0) and [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) can yield 5 - 300% faster inference speed. On an A100 GPU, inference can be up to 50% faster! -- If you can't use PyTorch 2, we recommend you install [xFormers](./optimization/xformers). Its memory-efficient attention mechanism works great with PyTorch 1.13.1 for faster speed and reduced memory consumption. -- Other optimization techniques, such as model offloading, are covered in [this guide](./optimization/fp16). diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/__init__.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/__init__.py deleted file mode 100644 index 63b34e16c95afc9f6430a49e19fdb5f3fa559c17..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - get_objects_from_module, - is_torch_available, - is_transformers_available, -) - - -_dummy_objects = {} -_import_structure = {} - -try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 - - _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) -else: - _import_structure["pipeline_kandinsky"] = ["KandinskyPipeline"] - _import_structure["pipeline_kandinsky_combined"] = [ - "KandinskyCombinedPipeline", - "KandinskyImg2ImgCombinedPipeline", - "KandinskyInpaintCombinedPipeline", - ] - _import_structure["pipeline_kandinsky_img2img"] = ["KandinskyImg2ImgPipeline"] - _import_structure["pipeline_kandinsky_inpaint"] = ["KandinskyInpaintPipeline"] - _import_structure["pipeline_kandinsky_prior"] = ["KandinskyPriorPipeline", "KandinskyPriorPipelineOutput"] - _import_structure["text_encoder"] = ["MultilingualCLIP"] - - -if TYPE_CHECKING: - try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * - - else: - from .pipeline_kandinsky import KandinskyPipeline - from .pipeline_kandinsky_combined import ( - KandinskyCombinedPipeline, - KandinskyImg2ImgCombinedPipeline, - KandinskyInpaintCombinedPipeline, - ) - from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline - from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline - from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput - from .text_encoder import MultilingualCLIP - -else: - import sys - - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - module_spec=__spec__, - ) - - for name, value in _dummy_objects.items(): - setattr(sys.modules[__name__], name, value) diff --git a/spaces/parkyzh/bingo/src/lib/bots/bing/types.ts b/spaces/parkyzh/bingo/src/lib/bots/bing/types.ts deleted file mode 100644 index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000 --- a/spaces/parkyzh/bingo/src/lib/bots/bing/types.ts +++ /dev/null @@ -1,259 +0,0 @@ -export type Author = 'user' | 'system' | 'bot' - -export type BotId = 'bing' - -export enum BingConversationStyle { - Creative = 'Creative', - Balanced = 'Balanced', - Precise = 'Precise' -} - -export enum ErrorCode { - CONVERSATION_LIMIT = 'CONVERSATION_LIMIT', - BING_UNAUTHORIZED = 'BING_UNAUTHORIZED', - BING_FORBIDDEN = 'BING_FORBIDDEN', - BING_CAPTCHA = 'BING_CAPTCHA', - THROTTLE_LIMIT = 'THROTTLE_LIMIT', - NOTFOUND_ERROR = 'NOT_FOUND_ERROR', - UNKOWN_ERROR = 'UNKOWN_ERROR', - NETWORK_ERROR = 'NETWORK_ERROR', -} - -export class ChatError extends Error { - code: ErrorCode - constructor(message: string, code: ErrorCode) { - super(message) - this.code = code - } -} - -export type ChatMessageModel = { - id: string - author: Author - text: string - error?: ChatError - throttling?: Throttling - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] -} - -export interface ConversationModel { - messages: ChatMessageModel[] -} - -export type Event = - | { - type: 'UPDATE_ANSWER' - data: { - text: string - spokenText?: string - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] - throttling?: Throttling - } - } - | { - type: 'DONE' - } - | { - type: 'ERROR' - error: ChatError - } - -export interface SendMessageParams { - prompt: string - imageUrl?: string - options: T - onEvent: (event: Event) => void - signal?: AbortSignal -} - -export interface ConversationResponse { - conversationId: string - clientId: string - conversationSignature: string - result: { - value: string - message?: string - } -} - -export interface Telemetry { - metrics?: null - startTime: string -} - -export interface ChatUpdateArgument { - messages?: ChatResponseMessage[] - throttling?: Throttling - requestId: string - result: null -} - -export type ChatUpdateCompleteResponse = { - type: 2 - invocationId: string - item: ChatResponseItem -} | { - type: 1 - target: string - arguments: ChatUpdateArgument[] -} | { - type: 3 - invocationId: string -} | { - type: 6 | 7 -} - -export interface ChatRequestResult { - value: string - serviceVersion: string - error?: string -} - -export interface ChatResponseItem { - messages: ChatResponseMessage[] - firstNewMessageIndex: number - suggestedResponses: null - conversationId: string - requestId: string - conversationExpiryTime: string - telemetry: Telemetry - result: ChatRequestResult - throttling: Throttling -} -export enum InvocationEventType { - Invocation = 1, - StreamItem = 2, - Completion = 3, - StreamInvocation = 4, - CancelInvocation = 5, - Ping = 6, - Close = 7, -} - -// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts - -export interface ConversationInfo { - conversationId: string - clientId: string - conversationSignature: string - invocationId: number - conversationStyle: BingConversationStyle - prompt: string - imageUrl?: string -} - -export interface BingChatResponse { - conversationSignature: string - conversationId: string - clientId: string - invocationId: number - conversationExpiryTime: Date - response: string - details: ChatResponseMessage -} - -export interface Throttling { - maxNumLongDocSummaryUserMessagesInConversation: number - maxNumUserMessagesInConversation: number - numLongDocSummaryUserMessagesInConversation: number - numUserMessagesInConversation: number -} - -export interface ChatResponseMessage { - text: string - spokenText?: string - author: string - createdAt: Date - timestamp: Date - messageId: string - requestId: string - offense: string - adaptiveCards: AdaptiveCard[] - sourceAttributions: SourceAttribution[] - feedback: Feedback - contentOrigin: string - messageType?: string - contentType?: string - privacy: null - suggestedResponses: SuggestedResponse[] -} - -export interface AdaptiveCard { - type: string - version: string - body: Body[] -} - -export interface Body { - type: string - text: string - wrap: boolean - size?: string -} - -export interface Feedback { - tag: null - updatedOn: null - type: string -} - -export interface SourceAttribution { - providerDisplayName: string - seeMoreUrl: string - searchQuery: string -} - -export interface SuggestedResponse { - text: string - author?: Author - createdAt?: Date - timestamp?: Date - messageId?: string - messageType?: string - offense?: string - feedback?: Feedback - contentOrigin?: string - privacy?: null -} - -export interface KBlobRequest { - knowledgeRequest: KnowledgeRequestContext - imageBase64?: string -} - -export interface KBlobResponse { - blobId: string - processedBlobId?: string -} - -export interface KnowledgeRequestContext { - imageInfo: ImageInfo; - knowledgeRequest: KnowledgeRequest; -} - -export interface ImageInfo { - url?: string; -} - -export interface KnowledgeRequest { - invokedSkills: string[]; - subscriptionId: string; - invokedSkillsRequestData: InvokedSkillsRequestData; - convoData: ConvoData; -} - -export interface ConvoData { - convoid: string; - convotone: BingConversationStyle; -} - -export interface InvokedSkillsRequestData { - enableFaceBlur: boolean; -} - -export interface FileItem { - url: string; - status?: 'loading' | 'error' | 'loaded' -} diff --git a/spaces/peteralexandercharles/whisper-restore-punctuation/README.md b/spaces/peteralexandercharles/whisper-restore-punctuation/README.md deleted file mode 100644 index 6d303b27d545b186c1070a581332cc7a025f6d65..0000000000000000000000000000000000000000 --- a/spaces/peteralexandercharles/whisper-restore-punctuation/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Whisper Restore Punctuation -emoji: 👀 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: speechbox/whisper-restore-punctuation ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/phiyodr/dacl-challenge/app.py b/spaces/phiyodr/dacl-challenge/app.py deleted file mode 100644 index 626bfafee08ac6bef5314e03c925dc64facda537..0000000000000000000000000000000000000000 --- a/spaces/phiyodr/dacl-challenge/app.py +++ /dev/null @@ -1,258 +0,0 @@ -import gradio as gr -import torch -from PIL import Image -from torchvision import transforms -import numpy as np -from matplotlib import pyplot as plt -from torch import nn -from transformers import SegformerForSemanticSegmentation -import sys -import io -import pdb - -################### -# Setup label names -target_list = ['Crack', 'ACrack', 'Wetspot', 'Efflorescence', 'Rust', 'Rockpocket', 'Hollowareas', 'Cavity', - 'Spalling', 'Graffiti', 'Weathering', 'Restformwork', 'ExposedRebars', - 'Bearing', 'EJoint', 'Drainage', 'PEquipment', 'JTape', 'WConccor'] -target_list_all = ["All"] + target_list -classes, nclasses = target_list, len(target_list) -label2id = dict(zip(classes, range(nclasses))) -id2label = dict(zip(range(nclasses), classes)) - -############ -# Load model -device = torch.device('cpu') -segformer = SegformerForSemanticSegmentation.from_pretrained("nvidia/mit-b1", - id2label=id2label, - label2id=label2id) - -# SegModel -class SegModel(nn.Module): - def __init__(self, segformer): - super(SegModel, self).__init__() - self.segformer = segformer - self.upsample = nn.Upsample(scale_factor=4, mode='nearest') - - def forward(self, x): - return self.upsample(self.segformer(x).logits) - -model = SegModel(segformer) -path = "runs/2023-08-31_rich-paper-12/best_model_cpu.pth" -print(f"Load Segformer weights from {path}") -#model = model.load_state_dict(torch.load(path, map_location=device)) -model = torch.load(path) -model.eval() - -################## -# Image preprocess -################## - -to_tensor = transforms.ToTensor() -to_array = transforms.ToPILImage() -resize = transforms.Resize((512, 512)) -resize_small = transforms.Resize((369,369)) -normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - -def process_pil(img): - img = to_tensor(img) - img = resize(img) - img = normalize(img) - return img - -# the background of the image -def resize_pil(img): - img = to_tensor(img) - img = resize_small(img) - img = to_array(img) - return img - -# combine the foreground (mask_all) and background (original image) to create one image -def transparent(fg, bg, alpha_factor): - - foreground = np.array(fg) - background = np.array(bg) - - background = Image.fromarray(bg) - foreground = Image.fromarray(fg) - new_alpha_factor = int(255*alpha_factor) - foreground.putalpha(new_alpha_factor) - background.paste(foreground, (0, 0), foreground) - - return background - -def show_img(all_imgs, dropdown, bg, alpha_factor): - idx = target_list_all.index(dropdown) - fg= all_imgs[idx]["name"] - - foreground = Image.open(fg) - background = np.array(bg) - - background = Image.fromarray(bg) - new_alpha_factor = int(255*alpha_factor) - foreground.putalpha(new_alpha_factor) - background.paste(foreground, (0, 0), foreground) - - return background - -########### -# Inference - - -def inference(img, alpha_factor): - background = resize_pil(img) - - img = process_pil(img) - - mask = model(img.unsqueeze(0)) # we need a batch, hence we introduce an extra dimenation at position 0 (unsqueeze) - mask = mask[0] - - # Get probability values (logits to probs) - mask_probs = torch.sigmoid(mask) - mask_probs = mask_probs.detach().numpy() - mask_probs.shape - - # Make binary mask - THRESHOLD = 0.5 - mask_preds = mask_probs > THRESHOLD - - # All combined - mask_all = mask_preds.sum(axis=0) - mask_all = np.expand_dims(mask_all, axis=0) - mask_all.shape - - # Concat all combined with normal preds - mask_preds = np.concatenate((mask_all, mask_preds),axis=0) - labs = ["ALL"] + target_list - - fig, axes = plt.subplots(5, 4, figsize = (10,10)) - - # save all mask_preds in all_mask - all_masks = [] - - for i, ax in enumerate(axes.flat): - label = labs[i] - - all_masks.append(mask_preds[i]) - - ax.imshow(mask_preds[i]) - ax.set_title(label) - - plt.tight_layout() - - # plt to PIL - img_buf = io.BytesIO() - fig.savefig(img_buf, format='png') - im = Image.open(img_buf) - - # Saved all masks combined with unvisible xaxis und yaxis and without a white - # background. - all_images = [] - for i in range(len(all_masks)): - plt.figure() - fig = plt.imshow(all_masks[i]) - plt.axis('off') - fig.axes.get_xaxis().set_visible(False) - fig.axes.get_yaxis().set_visible(False) - img_buf = io.BytesIO() - plt.savefig(img_buf, bbox_inches='tight', pad_inches = 0, format='png') - all_images.append(Image.open(img_buf)) - - return im, all_images, background - - - -examples=[ -["assets/dacl10k_v2_validation_0026.jpg", "dacl10k_v2_validation_0026.jpg"], -["assets/dacl10k_v2_validation_0037.jpg", "dacl10k_v2_validation_0037.jpg"], -["assets/dacl10k_v2_validation_0053.jpg", "dacl10k_v2_validation_0053.jpg"], -["assets/dacl10k_v2_validation_0068.jpg", "dacl10k_v2_validation_0068.jpg"], -["assets/dacl10k_v2_validation_0153.jpg", "dacl10k_v2_validation_0153.jpg"], -["assets/dacl10k_v2_validation_0263.jpg", "dacl10k_v2_validation_0263.jpg"], -["assets/dacl10k_v2_validation_0336.jpg", "dacl10k_v2_validation_0336.jpg"], -["assets/dacl10k_v2_validation_0500.jpg", "dacl10k_v2_validation_0500.jpg"], -["assets/dacl10k_v2_validation_0549.jpg", "dacl10k_v2_validation_0549.jpg"], -["assets/dacl10k_v2_validation_0609.jpg", "dacl10k_v2_validation_0609.jpg"] -] - - - -title = "dacl-challenge @ WACV2024" -description = """ -

    -

    dacl-challenge @ WACV2024

    -

    - -

    -Twitter/X | -WACV2024 | -arXiv | -Python Toolkit | -voxel51.com | -eval.ai | -dacl.ai workshop page -

    - -

    -

    📛 The challenge uses the dacl10k dataset, which stands for damage classification 10k images and is a multi-label semantic segmentation dataset for 19 classes (13 damages and 6 objects) present on bridges.

    -

    - -

    🏆 The dataset is used in the dacl-challenge associated with the "1st Workshop on Vision-Based Structural Inspections in Civil Engineering" at WACV2024.

    -

    -
    - -

    Civil engineering structures such as power plants, sewers, and bridges form essential components of the public infrastructure. It is mandatory to keep these structures in a safe and operational state. In order to ensure this, they are frequently inspected where the current recognition and documentation of defects and building components is mostly carried out manually. A failure of individual structures results in enormous costs. For example, the economic costs caused by the closure of a bridge due to congestion is many times the cost of the bridge itself and its maintenance.

    -

    - -

    Recent advancements in hardware and software offer great potential for increasing the quality, traceability, and efficiency of the structural inspection process. In particular, methods from the field of computer vision play an important role. The new techniques support the inspection engineer at the building site, raising quality and efficiency of the inspection. There is a high demand worldwide for the automation of structural inspections in the areas of building construction, bridge construction, tunnel construction, sewage plants, and other critical infrastructures.

    -

    - -

    In the “1st Workshop on Vision-Based Structural Inspections in Civil Engineering,” approaches utilizing computer vision for analyzing and assessing civil engineering structures will be explored. The workshop will provide a platform for experts from both the academic and application community. The core of the workshop is the “dacl-challenge,” which aims to find the best models for recognizing bridge defects and bridge components by means of semantic segmentation. The challenge is based on the “dacl10k” dataset, a novel, real-world, large-scale benchmark for multi-label semantic segmentation that distinguishes between 13 defect types and six building components. The workshop will take place at the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2024.

    -

    - -

    Details: -

    -

    - - -

    Workflow: -

      -
    • Upload an image or select one from "Examples".
    • -
    • Then click "1) Generate Masks"
    • -
    • Select an damage or object type in "Select Label" and choose an "Alpha Factor" for transparancy.
    • -
    • Then click "2) Generate Transparent Mask (with Alpha Factor)"
    • -
    -""" - -article = "

    Github Repo

    " - - - -with gr.Blocks() as app: - with gr.Row(): - gr.Markdown(description) - with gr.Row(): - input_img = gr.inputs.Image(type="pil", label="Original Image") - gr.Examples(examples=examples, inputs=[input_img]) - with gr.Row(): - img = gr.outputs.Image(type="pil", label="All Masks") - transparent_img = gr.outputs.Image(type="pil", label="Transparent Image") - with gr.Row(): - dropdown = gr.Dropdown(choices=target_list_all, label="Select Label", value="All") - slider = gr.Slider(minimum=0, maximum=1, value=0.4, label="Alpha Factor") - - all_masks = gr.Gallery(visible=False) - background = gr.Image(visible=False) - - generate_mask_slider = gr.Button("1) Generate Masks") - generate_mask_slider.click(inference, inputs=[input_img], outputs=[img, all_masks, background]) - - submit_transparent_img = gr.Button("2) Generate Transparent Mask (with Alpha Factor)") - submit_transparent_img.click(show_img, inputs=[all_masks, dropdown, background, slider], outputs=[transparent_img]) - - -app.launch() \ No newline at end of file diff --git a/spaces/pikaduck/DungeonMaster/README.md b/spaces/pikaduck/DungeonMaster/README.md deleted file mode 100644 index 9baf8911b6cb7cad6482e219262aa259527eec8e..0000000000000000000000000000000000000000 --- a/spaces/pikaduck/DungeonMaster/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DungeonMaster -emoji: 🏃 -colorFrom: green -colorTo: green -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pixiou/bingo/src/lib/hooks/use-bing.ts b/spaces/pixiou/bingo/src/lib/hooks/use-bing.ts deleted file mode 100644 index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/lib/hooks/use-bing.ts +++ /dev/null @@ -1,173 +0,0 @@ -'use client' - -import { useState, useCallback, useEffect, useMemo } from 'react' -import { useAtom, useAtomValue } from 'jotai' -import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state' -import { setConversationMessages } from './chat-history' -import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types' -import { nanoid } from '../utils' -import { TTS } from '../bots/bing/tts' - -export function useBing(botId: BotId = 'bing') { - const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId]) - const [enableTTS] = useAtom(voiceAtom) - const speaker = useMemo(() => new TTS(), []) - const [hash, setHash] = useAtom(hashAtom) - const bingConversationStyle = useAtomValue(bingConversationStyleAtom) - const [chatState, setChatState] = useAtom(chatAtom) - const [input, setInput] = useState('') - const [attachmentList, setAttachmentList] = useState([]) - - const updateMessage = useCallback( - (messageId: string, updater: (message: ChatMessageModel) => void) => { - setChatState((draft) => { - const message = draft.messages.find((m) => m.id === messageId) - if (message) { - updater(message) - } - }) - }, - [setChatState], - ) - - const sendMessage = useCallback( - async (input: string, options = {}) => { - const botMessageId = nanoid() - const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined - setChatState((draft) => { - const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input - draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' }) - setAttachmentList([]) - }) - const abortController = new AbortController() - setChatState((draft) => { - draft.generatingMessageId = botMessageId - draft.abortController = abortController - }) - speaker.reset() - await chatState.bot.sendMessage({ - prompt: input, - imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl, - options: { - ...options, - bingConversationStyle, - }, - signal: abortController.signal, - onEvent(event) { - if (event.type === 'UPDATE_ANSWER') { - updateMessage(botMessageId, (message) => { - if (event.data.text.length > message.text.length) { - message.text = event.data.text - } - - if (event.data.spokenText && enableTTS) { - speaker.speak(event.data.spokenText) - } - - message.throttling = event.data.throttling || message.throttling - message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions - message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses - }) - } else if (event.type === 'ERROR') { - updateMessage(botMessageId, (message) => { - message.error = event.error - }) - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } else if (event.type === 'DONE') { - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } - }, - }) - }, - [botId, attachmentList, chatState.bot, setChatState, updateMessage], - ) - - const uploadImage = useCallback(async (imgUrl: string) => { - setAttachmentList([{ url: imgUrl, status: 'loading' }]) - const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle) - if (response?.blobId) { - setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }]) - } else { - setAttachmentList([{ url: imgUrl, status: 'error' }]) - } - }, [chatState.bot]) - - const resetConversation = useCallback(() => { - chatState.bot.resetConversation() - speaker.abort() - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }] - draft.conversationId = nanoid() - }) - }, [chatState.bot, setChatState]) - - const stopGenerating = useCallback(() => { - chatState.abortController?.abort() - if (chatState.generatingMessageId) { - updateMessage(chatState.generatingMessageId, (message) => { - if (!message.text && !message.error) { - message.text = 'Cancelled' - } - }) - } - setChatState((draft) => { - draft.generatingMessageId = '' - }) - }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage]) - - useEffect(() => { - if (chatState.messages.length) { - setConversationMessages(botId, chatState.conversationId, chatState.messages) - } - }, [botId, chatState.conversationId, chatState.messages]) - - useEffect(() => { - if (hash === 'reset') { - resetConversation() - setHash('') - } - }, [hash, setHash]) - - const chat = useMemo( - () => ({ - botId, - bot: chatState.bot, - isSpeaking: speaker.isSpeaking, - messages: chatState.messages, - sendMessage, - setInput, - input, - resetConversation, - generating: !!chatState.generatingMessageId, - stopGenerating, - uploadImage, - setAttachmentList, - attachmentList, - }), - [ - botId, - bingConversationStyle, - chatState.bot, - chatState.generatingMessageId, - chatState.messages, - speaker.isSpeaking, - setInput, - input, - setAttachmentList, - attachmentList, - resetConversation, - sendMessage, - stopGenerating, - ], - ) - - return chat -} diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py deleted file mode 100644 index 593bff23edecd3c517c96e119ee777bd4ee1d9d0..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py +++ /dev/null @@ -1,55 +0,0 @@ -import importlib.metadata -from typing import Any, Optional, Protocol, cast - - -class BadMetadata(ValueError): - def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None: - self.dist = dist - self.reason = reason - - def __str__(self) -> str: - return f"Bad metadata in {self.dist} ({self.reason})" - - -class BasePath(Protocol): - """A protocol that various path objects conform. - - This exists because importlib.metadata uses both ``pathlib.Path`` and - ``zipfile.Path``, and we need a common base for type hints (Union does not - work well since ``zipfile.Path`` is too new for our linter setup). - - This does not mean to be exhaustive, but only contains things that present - in both classes *that we need*. - """ - - @property - def name(self) -> str: - raise NotImplementedError() - - @property - def parent(self) -> "BasePath": - raise NotImplementedError() - - -def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]: - """Find the path to the distribution's metadata directory. - - HACK: This relies on importlib.metadata's private ``_path`` attribute. Not - all distributions exist on disk, so importlib.metadata is correct to not - expose the attribute as public. But pip's code base is old and not as clean, - so we do this to avoid having to rewrite too many things. Hopefully we can - eliminate this some day. - """ - return getattr(d, "_path", None) - - -def get_dist_name(dist: importlib.metadata.Distribution) -> str: - """Get the distribution's project name. - - The ``name`` attribute is only available in Python 3.10 or later. We are - targeting exactly that, but Mypy does not know this. - """ - name = cast(Any, dist).name - if not isinstance(name, str): - raise BadMetadata(dist, reason="invalid metadata entry 'name'") - return name diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py deleted file mode 100644 index 398386a5b9f61c13be314e256e671a37d28e3623..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py +++ /dev/null @@ -1,170 +0,0 @@ -from __future__ import absolute_import - -from .filepost import encode_multipart_formdata -from .packages.six.moves.urllib.parse import urlencode - -__all__ = ["RequestMethods"] - - -class RequestMethods(object): - """ - Convenience mixin for classes who implement a :meth:`urlopen` method, such - as :class:`urllib3.HTTPConnectionPool` and - :class:`urllib3.PoolManager`. - - Provides behavior for making common types of HTTP request methods and - decides which type of request field encoding to use. - - Specifically, - - :meth:`.request_encode_url` is for sending requests whose fields are - encoded in the URL (such as GET, HEAD, DELETE). - - :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-form-urlencoded - (such as for POST, PUT, PATCH). - - :meth:`.request` is for making any kind of request, it will look up the - appropriate encoding format and use one of the above two methods to make - the request. - - Initializer parameters: - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - """ - - _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} - - def __init__(self, headers=None): - self.headers = headers or {} - - def urlopen( - self, - method, - url, - body=None, - headers=None, - encode_multipart=True, - multipart_boundary=None, - **kw - ): # Abstract - raise NotImplementedError( - "Classes extending RequestMethods must implement " - "their own ``urlopen`` method." - ) - - def request(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the appropriate encoding of - ``fields`` based on the ``method`` used. - - This is a convenience method that requires the least amount of manual - effort. It can be used in most situations, while still having the - option to drop down to more specific methods when necessary, such as - :meth:`request_encode_url`, :meth:`request_encode_body`, - or even the lowest level :meth:`urlopen`. - """ - method = method.upper() - - urlopen_kw["request_url"] = url - - if method in self._encode_url_methods: - return self.request_encode_url( - method, url, fields=fields, headers=headers, **urlopen_kw - ) - else: - return self.request_encode_body( - method, url, fields=fields, headers=headers, **urlopen_kw - ) - - def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the url. This is useful for request methods like GET, HEAD, DELETE, etc. - """ - if headers is None: - headers = self.headers - - extra_kw = {"headers": headers} - extra_kw.update(urlopen_kw) - - if fields: - url += "?" + urlencode(fields) - - return self.urlopen(method, url, **extra_kw) - - def request_encode_body( - self, - method, - url, - fields=None, - headers=None, - encode_multipart=True, - multipart_boundary=None, - **urlopen_kw - ): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the body. This is useful for request methods like POST, PUT, PATCH, etc. - - When ``encode_multipart=True`` (default), then - :func:`urllib3.encode_multipart_formdata` is used to encode - the payload with the appropriate content type. Otherwise - :func:`urllib.parse.urlencode` is used with the - 'application/x-www-form-urlencoded' content type. - - Multipart encoding must be used when posting files, and it's reasonably - safe to use it in other times too. However, it may break request - signing, such as with OAuth. - - Supports an optional ``fields`` parameter of key/value strings AND - key/filetuple. A filetuple is a (filename, data, MIME type) tuple where - the MIME type is optional. For example:: - - fields = { - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), - 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - } - - When uploading a file, providing a filename (the first parameter of the - tuple) is optional but recommended to best mimic behavior of browsers. - - Note that if ``headers`` are supplied, the 'Content-Type' header will - be overwritten because it depends on the dynamic random boundary string - which is used to compose the body of the request. The random boundary - string can be explicitly set with the ``multipart_boundary`` parameter. - """ - if headers is None: - headers = self.headers - - extra_kw = {"headers": {}} - - if fields: - if "body" in urlopen_kw: - raise TypeError( - "request got values for both 'fields' and 'body', can only specify one." - ) - - if encode_multipart: - body, content_type = encode_multipart_formdata( - fields, boundary=multipart_boundary - ) - else: - body, content_type = ( - urlencode(fields), - "application/x-www-form-urlencoded", - ) - - extra_kw["body"] = body - extra_kw["headers"] = {"Content-Type": content_type} - - extra_kw["headers"].update(headers) - extra_kw.update(urlopen_kw) - - return self.urlopen(method, url, **extra_kw) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/webencodings/tests.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/webencodings/tests.py deleted file mode 100644 index e12c10d033026f09cf97b81d29555e12aae8c762..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/webencodings/tests.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding: utf-8 -""" - - webencodings.tests - ~~~~~~~~~~~~~~~~~~ - - A basic test suite for Encoding. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -from __future__ import unicode_literals - -from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, - IncrementalDecoder, IncrementalEncoder, UTF8) - - -def assert_raises(exception, function, *args, **kwargs): - try: - function(*args, **kwargs) - except exception: - return - else: # pragma: no cover - raise AssertionError('Did not raise %s.' % exception) - - -def test_labels(): - assert lookup('utf-8').name == 'utf-8' - assert lookup('Utf-8').name == 'utf-8' - assert lookup('UTF-8').name == 'utf-8' - assert lookup('utf8').name == 'utf-8' - assert lookup('utf8').name == 'utf-8' - assert lookup('utf8 ').name == 'utf-8' - assert lookup(' \r\nutf8\t').name == 'utf-8' - assert lookup('u8') is None # Python label. - assert lookup('utf-8 ') is None # Non-ASCII white space. - - assert lookup('US-ASCII').name == 'windows-1252' - assert lookup('iso-8859-1').name == 'windows-1252' - assert lookup('latin1').name == 'windows-1252' - assert lookup('LATIN1').name == 'windows-1252' - assert lookup('latin-1') is None - assert lookup('LATİN1') is None # ASCII-only case insensitivity. - - -def test_all_labels(): - for label in LABELS: - assert decode(b'', label) == ('', lookup(label)) - assert encode('', label) == b'' - for repeat in [0, 1, 12]: - output, _ = iter_decode([b''] * repeat, label) - assert list(output) == [] - assert list(iter_encode([''] * repeat, label)) == [] - decoder = IncrementalDecoder(label) - assert decoder.decode(b'') == '' - assert decoder.decode(b'', final=True) == '' - encoder = IncrementalEncoder(label) - assert encoder.encode('') == b'' - assert encoder.encode('', final=True) == b'' - # All encoding names are valid labels too: - for name in set(LABELS.values()): - assert lookup(name).name == name - - -def test_invalid_label(): - assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') - assert_raises(LookupError, encode, 'é', 'invalid') - assert_raises(LookupError, iter_decode, [], 'invalid') - assert_raises(LookupError, iter_encode, [], 'invalid') - assert_raises(LookupError, IncrementalDecoder, 'invalid') - assert_raises(LookupError, IncrementalEncoder, 'invalid') - - -def test_decode(): - assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) - assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) - assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) - assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) - assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) - assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM - - assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM - assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM - assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) - assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) - - assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) - assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) - assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) - - assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) - assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) - assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) - - -def test_encode(): - assert encode('é', 'latin1') == b'\xe9' - assert encode('é', 'utf8') == b'\xc3\xa9' - assert encode('é', 'utf8') == b'\xc3\xa9' - assert encode('é', 'utf-16') == b'\xe9\x00' - assert encode('é', 'utf-16le') == b'\xe9\x00' - assert encode('é', 'utf-16be') == b'\x00\xe9' - - -def test_iter_decode(): - def iter_decode_to_string(input, fallback_encoding): - output, _encoding = iter_decode(input, fallback_encoding) - return ''.join(output) - assert iter_decode_to_string([], 'latin1') == '' - assert iter_decode_to_string([b''], 'latin1') == '' - assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' - assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' - assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' - assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' - assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' - assert iter_decode_to_string([ - b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' - assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' - assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' - - -def test_iter_encode(): - assert b''.join(iter_encode([], 'latin1')) == b'' - assert b''.join(iter_encode([''], 'latin1')) == b'' - assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' - assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' - assert b''.join(iter_encode([ - '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' - - -def test_x_user_defined(): - encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' - decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' - encoded = b'aa' - decoded = 'aa' - assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) - assert encode(decoded, 'x-user-defined') == encoded diff --git a/spaces/poiiii/clefourrier-graphormer-base-pcqm4mv1/README.md b/spaces/poiiii/clefourrier-graphormer-base-pcqm4mv1/README.md deleted file mode 100644 index beaa2adf3366031566c6312883ab74ec71020881..0000000000000000000000000000000000000000 --- a/spaces/poiiii/clefourrier-graphormer-base-pcqm4mv1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Clefourrier Graphormer Base Pcqm4mv1 -emoji: 👀 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/power2/JoJoGan-powerhow2/e4e/editings/latent_editor.py b/spaces/power2/JoJoGan-powerhow2/e4e/editings/latent_editor.py deleted file mode 100644 index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000 --- a/spaces/power2/JoJoGan-powerhow2/e4e/editings/latent_editor.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import sys -sys.path.append(".") -sys.path.append("..") -from editings import ganspace, sefa -from utils.common import tensor2im - - -class LatentEditor(object): - def __init__(self, stylegan_generator, is_cars=False): - self.generator = stylegan_generator - self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output. - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return self._latents_to_image(edit_latents) - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return self._latents_to_image(edit_latents) - - def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs): - edit_latents = sefa.edit(self.generator, latent, indices, **kwargs) - return self._latents_to_image(edit_latents) - - # Currently, in order to apply StyleFlow editings, one should run inference, - # save the latent codes and load them form the official StyleFlow repository. - # def apply_styleflow(self): - # pass - - def _latents_to_image(self, latents): - with torch.no_grad(): - images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True) - if self.is_cars: - images = images[:, :, 64:448, :] # 512x512 -> 384x512 - horizontal_concat_image = torch.cat(list(images), 2) - final_image = tensor2im(horizontal_concat_image) - return final_image diff --git a/spaces/pragnakalp/bert_based_ner/app.py b/spaces/pragnakalp/bert_based_ner/app.py deleted file mode 100644 index aa27978cc429f42065552378c2bb7b4c391c9c0f..0000000000000000000000000000000000000000 --- a/spaces/pragnakalp/bert_based_ner/app.py +++ /dev/null @@ -1,167 +0,0 @@ -import json -import gradio as gr -import datetime -from datetime import date -import smtplib -import csv -from email.mime.text import MIMEText -import requests -import os -import numpy as np -import json -from tqdm import trange -import gc -import torch -import torch.nn.functional as F -from bert_ner_model_loader import Ner -import pandas as pd -from huggingface_hub import Repository -import huggingface_hub -import socket -from urllib.request import urlopen -import re as r -from transformers import AutoTokenizer, AutoModelWithLMHead - -HF_TOKEN = os.environ.get("HF_TOKEN") -DATASET_NAME = "bert_based_ner_dataset" -DATASET_REPO_URL = f"https://huggingface.co/datasets/pragnakalp/{DATASET_NAME}" -DATA_FILENAME = "bert_base_ner_logs.csv" -DATA_FILE = os.path.join("bert_base_ner_logs", DATA_FILENAME) -DATASET_REPO_ID = "pragnakalp/bert_based_ner_dataset" -print("is none?", HF_TOKEN is None) -input_value = "The U.S. President Donald Trump came to visit Ahmedabad first time at Motera Stadium with our Prime Minister Narendra Modi in February 2020" -try: - hf_hub_download( - repo_id=DATASET_REPO_ID, - filename=DATA_FILENAME, - cache_dir=DATA_DIRNAME, - force_filename=DATA_FILENAME - ) - -except: - print("file not found") - -repo = Repository( - local_dir="bert_base_ner_logs", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - -cwd = os.getcwd() -bert_ner_model = os.path.join(cwd) -Entities_Found =[] -Entity_Types = [] -k = 0 - - -def getIP(): - d = str(urlopen('http://checkip.dyndns.com/') - .read()) - - return r.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(d).group(1) - -def get_location(ip_addr): - ip=ip_addr - - req_data={ - "ip":ip, - "token":"pkml123" - } - url = "https://demos.pragnakalp.com/get-ip-location" - - # req_data=json.dumps(req_data) - # print("req_data",req_data) - headers = {'Content-Type': 'application/json'} - - response = requests.request("POST", url, headers=headers, data=json.dumps(req_data)) - response = response.json() - print("response======>>",response) - return response - -def generate_ner(article): - result = {'Entities Found':[], 'Entity Types':[]} - if article.strip(): - text = "Input sentence: " - text += article - - model_ner = Ner(bert_ner_model) - - output = model_ner.predict(text) - print(output) - k = 0 - Entities_Found.clear() - Entity_Types.clear() - save_data_and_sendmail(article,output) - for i in output: - for j in i: - if k == 0: - Entities_Found.append(j) - k += 1 - else: - Entity_Types.append(j) - k = 0 - result = {'Entities Found':Entities_Found, 'Entity Types':Entity_Types} - return pd.DataFrame(result) - else: - raise gr.Error("Please enter text in inputbox!!!!") - - -def save_data_and_sendmail(article,output): - try: - print("welcome") - ip_address = '' - - ip_address= getIP() - print(ip_address) - location = get_location(ip_address) - print(location) - add_csv = [article,output,ip_address,location] - with open(DATA_FILE, "a") as f: - writer = csv.writer(f) - # write the data - writer.writerow(add_csv) - commit_url = repo.push_to_hub() - print("commit data :",commit_url) - - url = 'https://pragnakalpdev33.pythonanywhere.com/HF_space_bert_base_ner' - myobj = {'article': article,'gen_text':output,'ip_addr':ip_address,"location":location} - x = requests.post(url, json = myobj) - - return "Successfully save data" - - except Exception as e: - print("error") - return "Error while sending mail" + str(e) - -input=gr.Textbox(lines=3, value=input_value, label="Input Text") -output = [gr.Dataframe(row_count = (2, "dynamic"), col_count=(2, "fixed"), headers=["Entities Found","Entity Types"], lable="Here is the result",wrap=True)] -# with gr.Blocks(css=".gradio-container {background-color: lightgray}") as demo: -# gr.Markdown("

    "+ "Named Entity Recognition Using BERT" + "



    ") -# with gr.Row(): -# with gr.Column(): -# input=gr.Textbox(lines=5, value=input_value, label="Input Text") -# sub_btn = gr.Button("Submit") -# output = gr.Dataframe(row_count = (3, "dynamic"), col_count=(2, "fixed"), headers=["Entities Found","Entity Types"]) -# gr.Markdown( -# """ -#

    Feel free to give us your feedback on this NER demo. -# For all your Named Entity Recognition related requirements, we are here to help you.
    -# Email us your requirement at letstalk@pragnakalp.com . -# And don't forget to check out more interesting NLP services we are offering.
    -# Developed by : Pragnakalp Techlabs

    -# """) - -# event = sub_btn.click(generate_emotion, inputs=input, outputs=output) -# demo.launch() - -demo = gr.Interface( - generate_ner, - input, - output, - title="Named Entity Recognition Using BERT", - css=".gradio-container {background-color: lightgray} #inp_div {background-color: #7FB3D5;", - article="""

    Feel free to give us your feedback on this NER demo. - For all your Named Entity Recognition related requirements, we are here to help you. Email us your requirement at - letstalk@pragnakalp.com. And don't forget to check out more interesting - NLP services we are offering. -

    Developed by : Pragnakalp Techlabs

    """ -) -demo.launch() \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_pyplot.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_pyplot.py deleted file mode 100644 index 68a1de24a5618d65351b26d0369ebb53ffe9577d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_pyplot.py +++ /dev/null @@ -1,459 +0,0 @@ -import difflib - -import numpy as np -import sys -from pathlib import Path - -import pytest - -import matplotlib as mpl -from matplotlib.testing import subprocess_run_for_testing -from matplotlib import pyplot as plt - - -def test_pyplot_up_to_date(tmpdir): - pytest.importorskip("black") - - gen_script = Path(mpl.__file__).parents[2] / "tools/boilerplate.py" - if not gen_script.exists(): - pytest.skip("boilerplate.py not found") - orig_contents = Path(plt.__file__).read_text() - plt_file = tmpdir.join('pyplot.py') - plt_file.write_text(orig_contents, 'utf-8') - - subprocess_run_for_testing( - [sys.executable, str(gen_script), str(plt_file)], - check=True) - new_contents = plt_file.read_text('utf-8') - - if orig_contents != new_contents: - diff_msg = '\n'.join( - difflib.unified_diff( - orig_contents.split('\n'), new_contents.split('\n'), - fromfile='found pyplot.py', - tofile='expected pyplot.py', - n=0, lineterm='')) - pytest.fail( - "pyplot.py is not up-to-date. Please run " - "'python tools/boilerplate.py' to update pyplot.py. " - "This needs to be done from an environment where your " - "current working copy is installed (e.g. 'pip install -e'd). " - "Here is a diff of unexpected differences:\n%s" % diff_msg - ) - - -def test_copy_docstring_and_deprecators(recwarn): - @mpl._api.rename_parameter("(version)", "old", "new") - @mpl._api.make_keyword_only("(version)", "kwo") - def func(new, kwo=None): - pass - - @plt._copy_docstring_and_deprecators(func) - def wrapper_func(new, kwo=None): - pass - - wrapper_func(None) - wrapper_func(new=None) - wrapper_func(None, kwo=None) - wrapper_func(new=None, kwo=None) - assert not recwarn - with pytest.warns(mpl.MatplotlibDeprecationWarning): - wrapper_func(old=None) - with pytest.warns(mpl.MatplotlibDeprecationWarning): - wrapper_func(None, None) - - -def test_pyplot_box(): - fig, ax = plt.subplots() - plt.box(False) - assert not ax.get_frame_on() - plt.box(True) - assert ax.get_frame_on() - plt.box() - assert not ax.get_frame_on() - plt.box() - assert ax.get_frame_on() - - -def test_stackplot_smoke(): - # Small smoke test for stackplot (see #12405) - plt.stackplot([1, 2, 3], [1, 2, 3]) - - -def test_nrows_error(): - with pytest.raises(TypeError): - plt.subplot(nrows=1) - with pytest.raises(TypeError): - plt.subplot(ncols=1) - - -def test_ioff(): - plt.ion() - assert mpl.is_interactive() - with plt.ioff(): - assert not mpl.is_interactive() - assert mpl.is_interactive() - - plt.ioff() - assert not mpl.is_interactive() - with plt.ioff(): - assert not mpl.is_interactive() - assert not mpl.is_interactive() - - -def test_ion(): - plt.ioff() - assert not mpl.is_interactive() - with plt.ion(): - assert mpl.is_interactive() - assert not mpl.is_interactive() - - plt.ion() - assert mpl.is_interactive() - with plt.ion(): - assert mpl.is_interactive() - assert mpl.is_interactive() - - -def test_nested_ion_ioff(): - # initial state is interactive - plt.ion() - - # mixed ioff/ion - with plt.ioff(): - assert not mpl.is_interactive() - with plt.ion(): - assert mpl.is_interactive() - assert not mpl.is_interactive() - assert mpl.is_interactive() - - # redundant contexts - with plt.ioff(): - with plt.ioff(): - assert not mpl.is_interactive() - assert mpl.is_interactive() - - with plt.ion(): - plt.ioff() - assert mpl.is_interactive() - - # initial state is not interactive - plt.ioff() - - # mixed ioff/ion - with plt.ion(): - assert mpl.is_interactive() - with plt.ioff(): - assert not mpl.is_interactive() - assert mpl.is_interactive() - assert not mpl.is_interactive() - - # redundant contexts - with plt.ion(): - with plt.ion(): - assert mpl.is_interactive() - assert not mpl.is_interactive() - - with plt.ioff(): - plt.ion() - assert not mpl.is_interactive() - - -def test_close(): - try: - plt.close(1.1) - except TypeError as e: - assert str(e) == "close() argument must be a Figure, an int, " \ - "a string, or None, not " - - -def test_subplot_reuse(): - ax1 = plt.subplot(121) - assert ax1 is plt.gca() - ax2 = plt.subplot(122) - assert ax2 is plt.gca() - ax3 = plt.subplot(121) - assert ax1 is plt.gca() - assert ax1 is ax3 - - -def test_axes_kwargs(): - # plt.axes() always creates new axes, even if axes kwargs differ. - plt.figure() - ax = plt.axes() - ax1 = plt.axes() - assert ax is not None - assert ax1 is not ax - plt.close() - - plt.figure() - ax = plt.axes(projection='polar') - ax1 = plt.axes(projection='polar') - assert ax is not None - assert ax1 is not ax - plt.close() - - plt.figure() - ax = plt.axes(projection='polar') - ax1 = plt.axes() - assert ax is not None - assert ax1.name == 'rectilinear' - assert ax1 is not ax - plt.close() - - -def test_subplot_replace_projection(): - # plt.subplot() searches for axes with the same subplot spec, and if one - # exists, and the kwargs match returns it, create a new one if they do not - fig = plt.figure() - ax = plt.subplot(1, 2, 1) - ax1 = plt.subplot(1, 2, 1) - ax2 = plt.subplot(1, 2, 2) - ax3 = plt.subplot(1, 2, 1, projection='polar') - ax4 = plt.subplot(1, 2, 1, projection='polar') - assert ax is not None - assert ax1 is ax - assert ax2 is not ax - assert ax3 is not ax - assert ax3 is ax4 - - assert ax in fig.axes - assert ax2 in fig.axes - assert ax3 in fig.axes - - assert ax.name == 'rectilinear' - assert ax2.name == 'rectilinear' - assert ax3.name == 'polar' - - -def test_subplot_kwarg_collision(): - ax1 = plt.subplot(projection='polar', theta_offset=0) - ax2 = plt.subplot(projection='polar', theta_offset=0) - assert ax1 is ax2 - ax1.remove() - ax3 = plt.subplot(projection='polar', theta_offset=1) - assert ax1 is not ax3 - assert ax1 not in plt.gcf().axes - - -def test_gca(): - # plt.gca() returns an existing axes, unless there were no axes. - plt.figure() - ax = plt.gca() - ax1 = plt.gca() - assert ax is not None - assert ax1 is ax - plt.close() - - -def test_subplot_projection_reuse(): - # create an Axes - ax1 = plt.subplot(111) - # check that it is current - assert ax1 is plt.gca() - # make sure we get it back if we ask again - assert ax1 is plt.subplot(111) - # remove it - ax1.remove() - # create a polar plot - ax2 = plt.subplot(111, projection='polar') - assert ax2 is plt.gca() - # this should have deleted the first axes - assert ax1 not in plt.gcf().axes - # assert we get it back if no extra parameters passed - assert ax2 is plt.subplot(111) - ax2.remove() - # now check explicitly setting the projection to rectilinear - # makes a new axes - ax3 = plt.subplot(111, projection='rectilinear') - assert ax3 is plt.gca() - assert ax3 is not ax2 - assert ax2 not in plt.gcf().axes - - -def test_subplot_polar_normalization(): - ax1 = plt.subplot(111, projection='polar') - ax2 = plt.subplot(111, polar=True) - ax3 = plt.subplot(111, polar=True, projection='polar') - assert ax1 is ax2 - assert ax1 is ax3 - - with pytest.raises(ValueError, - match="polar=True, yet projection='3d'"): - ax2 = plt.subplot(111, polar=True, projection='3d') - - -def test_subplot_change_projection(): - created_axes = set() - ax = plt.subplot() - created_axes.add(ax) - projections = ('aitoff', 'hammer', 'lambert', 'mollweide', - 'polar', 'rectilinear', '3d') - for proj in projections: - ax.remove() - ax = plt.subplot(projection=proj) - assert ax is plt.subplot() - assert ax.name == proj - created_axes.add(ax) - # Check that each call created a new Axes. - assert len(created_axes) == 1 + len(projections) - - -def test_polar_second_call(): - # the first call creates the axes with polar projection - ln1, = plt.polar(0., 1., 'ro') - assert isinstance(ln1, mpl.lines.Line2D) - # the second call should reuse the existing axes - ln2, = plt.polar(1.57, .5, 'bo') - assert isinstance(ln2, mpl.lines.Line2D) - assert ln1.axes is ln2.axes - - -def test_fallback_position(): - # check that position kwarg works if rect not supplied - axref = plt.axes([0.2, 0.2, 0.5, 0.5]) - axtest = plt.axes(position=[0.2, 0.2, 0.5, 0.5]) - np.testing.assert_allclose(axtest.bbox.get_points(), - axref.bbox.get_points()) - - # check that position kwarg ignored if rect is supplied - axref = plt.axes([0.2, 0.2, 0.5, 0.5]) - axtest = plt.axes([0.2, 0.2, 0.5, 0.5], position=[0.1, 0.1, 0.8, 0.8]) - np.testing.assert_allclose(axtest.bbox.get_points(), - axref.bbox.get_points()) - - -def test_set_current_figure_via_subfigure(): - fig1 = plt.figure() - subfigs = fig1.subfigures(2) - - plt.figure() - assert plt.gcf() != fig1 - - current = plt.figure(subfigs[1]) - assert plt.gcf() == fig1 - assert current == fig1 - - -def test_set_current_axes_on_subfigure(): - fig = plt.figure() - subfigs = fig.subfigures(2) - - ax = subfigs[0].subplots(1, squeeze=True) - subfigs[1].subplots(1, squeeze=True) - - assert plt.gca() != ax - plt.sca(ax) - assert plt.gca() == ax - - -def test_pylab_integration(): - IPython = pytest.importorskip("IPython") - mpl.testing.subprocess_run_helper( - IPython.start_ipython, - "--pylab", - "-c", - ";".join(( - "import matplotlib.pyplot as plt", - "assert plt._REPL_DISPLAYHOOK == plt._ReplDisplayHook.IPYTHON", - )), - timeout=60, - ) - - -def test_doc_pyplot_summary(): - """Test that pyplot_summary lists all the plot functions.""" - pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' - if not pyplot_docs.exists(): - pytest.skip("Documentation sources not available") - - def extract_documented_functions(lines): - """ - Return a list of all the functions that are mentioned in the - autosummary blocks contained in *lines*. - - An autosummary block looks like this:: - - .. autosummary:: - :toctree: _as_gen - :template: autosummary.rst - :nosignatures: - - plot - plot_date - - """ - functions = [] - in_autosummary = False - for line in lines: - if not in_autosummary: - if line.startswith(".. autosummary::"): - in_autosummary = True - else: - if not line or line.startswith(" :"): - # empty line or autosummary parameter - continue - if not line[0].isspace(): - # no more indentation: end of autosummary block - in_autosummary = False - continue - functions.append(line.strip()) - return functions - - lines = pyplot_docs.read_text().split("\n") - doc_functions = set(extract_documented_functions(lines)) - plot_commands = set(plt._get_pyplot_commands()) - missing = plot_commands.difference(doc_functions) - if missing: - raise AssertionError( - f"The following pyplot functions are not listed in the " - f"documentation. Please add them to doc/api/pyplot_summary.rst: " - f"{missing!r}") - extra = doc_functions.difference(plot_commands) - if extra: - raise AssertionError( - f"The following functions are listed in the pyplot documentation, " - f"but they do not exist in pyplot. " - f"Please remove them from doc/api/pyplot_summary.rst: {extra!r}") - - -def test_minor_ticks(): - plt.figure() - plt.plot(np.arange(1, 10)) - tick_pos, tick_labels = plt.xticks(minor=True) - assert np.all(tick_labels == np.array([], dtype=np.float64)) - assert tick_labels == [] - - plt.yticks(ticks=[3.5, 6.5], labels=["a", "b"], minor=True) - ax = plt.gca() - tick_pos = ax.get_yticks(minor=True) - tick_labels = ax.get_yticklabels(minor=True) - assert np.all(tick_pos == np.array([3.5, 6.5])) - assert [l.get_text() for l in tick_labels] == ['a', 'b'] - - -def test_switch_backend_no_close(): - plt.switch_backend('agg') - fig = plt.figure() - fig = plt.figure() - assert len(plt.get_fignums()) == 2 - plt.switch_backend('agg') - assert len(plt.get_fignums()) == 2 - with pytest.warns(mpl.MatplotlibDeprecationWarning): - plt.switch_backend('svg') - assert len(plt.get_fignums()) == 0 - - -def figure_hook_example(figure): - figure._test_was_here = True - - -def test_figure_hook(): - - test_rc = { - 'figure.hooks': ['matplotlib.tests.test_pyplot:figure_hook_example'] - } - with mpl.rc_context(test_rc): - fig = plt.figure() - - assert fig._test_was_here diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/units.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/units.py deleted file mode 100644 index e3480f228bb45cff45ed0566ceabdb1f3dcd53e1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/units.py +++ /dev/null @@ -1,195 +0,0 @@ -""" -The classes here provide support for using custom classes with -Matplotlib, e.g., those that do not expose the array interface but know -how to convert themselves to arrays. It also supports classes with -units and units conversion. Use cases include converters for custom -objects, e.g., a list of datetime objects, as well as for objects that -are unit aware. We don't assume any particular units implementation; -rather a units implementation must register with the Registry converter -dictionary and provide a `ConversionInterface`. For example, -here is a complete implementation which supports plotting with native -datetime objects:: - - import matplotlib.units as units - import matplotlib.dates as dates - import matplotlib.ticker as ticker - import datetime - - class DateConverter(units.ConversionInterface): - - @staticmethod - def convert(value, unit, axis): - "Convert a datetime value to a scalar or array." - return dates.date2num(value) - - @staticmethod - def axisinfo(unit, axis): - "Return major and minor tick locators and formatters." - if unit != 'date': - return None - majloc = dates.AutoDateLocator() - majfmt = dates.AutoDateFormatter(majloc) - return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='date') - - @staticmethod - def default_units(x, axis): - "Return the default unit for x or None." - return 'date' - - # Finally we register our object type with the Matplotlib units registry. - units.registry[datetime.date] = DateConverter() -""" - -from decimal import Decimal -from numbers import Number - -import numpy as np -from numpy import ma - -from matplotlib import cbook - - -class ConversionError(TypeError): - pass - - -def _is_natively_supported(x): - """ - Return whether *x* is of a type that Matplotlib natively supports or an - array of objects of such types. - """ - # Matplotlib natively supports all number types except Decimal. - if np.iterable(x): - # Assume lists are homogeneous as other functions in unit system. - for thisx in x: - if thisx is ma.masked: - continue - return isinstance(thisx, Number) and not isinstance(thisx, Decimal) - else: - return isinstance(x, Number) and not isinstance(x, Decimal) - - -class AxisInfo: - """ - Information to support default axis labeling, tick labeling, and limits. - - An instance of this class must be returned by - `ConversionInterface.axisinfo`. - """ - def __init__(self, majloc=None, minloc=None, - majfmt=None, minfmt=None, label=None, - default_limits=None): - """ - Parameters - ---------- - majloc, minloc : Locator, optional - Tick locators for the major and minor ticks. - majfmt, minfmt : Formatter, optional - Tick formatters for the major and minor ticks. - label : str, optional - The default axis label. - default_limits : optional - The default min and max limits of the axis if no data has - been plotted. - - Notes - ----- - If any of the above are ``None``, the axis will simply use the - default value. - """ - self.majloc = majloc - self.minloc = minloc - self.majfmt = majfmt - self.minfmt = minfmt - self.label = label - self.default_limits = default_limits - - -class ConversionInterface: - """ - The minimal interface for a converter to take custom data types (or - sequences) and convert them to values Matplotlib can use. - """ - - @staticmethod - def axisinfo(unit, axis): - """Return an `.AxisInfo` for the axis with the specified units.""" - return None - - @staticmethod - def default_units(x, axis): - """Return the default unit for *x* or ``None`` for the given axis.""" - return None - - @staticmethod - def convert(obj, unit, axis): - """ - Convert *obj* using *unit* for the specified *axis*. - - If *obj* is a sequence, return the converted sequence. The output must - be a sequence of scalars that can be used by the numpy array layer. - """ - return obj - - -class DecimalConverter(ConversionInterface): - """Converter for decimal.Decimal data to float.""" - - @staticmethod - def convert(value, unit, axis): - """ - Convert Decimals to floats. - - The *unit* and *axis* arguments are not used. - - Parameters - ---------- - value : decimal.Decimal or iterable - Decimal or list of Decimal need to be converted - """ - if isinstance(value, Decimal): - return float(value) - # value is Iterable[Decimal] - elif isinstance(value, ma.MaskedArray): - return ma.asarray(value, dtype=float) - else: - return np.asarray(value, dtype=float) - - # axisinfo and default_units can be inherited as Decimals are Numbers. - - -class Registry(dict): - """Register types with conversion interface.""" - - def get_converter(self, x): - """Get the converter interface instance for *x*, or None.""" - # Unpack in case of e.g. Pandas or xarray object - x = cbook._unpack_to_numpy(x) - - if isinstance(x, np.ndarray): - # In case x in a masked array, access the underlying data (only its - # type matters). If x is a regular ndarray, getdata() just returns - # the array itself. - x = np.ma.getdata(x).ravel() - # If there are no elements in x, infer the units from its dtype - if not x.size: - return self.get_converter(np.array([0], dtype=x.dtype)) - for cls in type(x).__mro__: # Look up in the cache. - try: - return self[cls] - except KeyError: - pass - try: # If cache lookup fails, look up based on first element... - first = cbook._safe_first_finite(x) - except (TypeError, StopIteration): - pass - else: - # ... and avoid infinite recursion for pathological iterables for - # which indexing returns instances of the same iterable class. - if type(first) is not type(x): - return self.get_converter(first) - return None - - -registry = Registry() -registry[Decimal] = DecimalConverter() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/array_api/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/array_api/__init__.py deleted file mode 100644 index 964873faab20ccc835ec4e0d574a877a00974632..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/array_api/__init__.py +++ /dev/null @@ -1,387 +0,0 @@ -""" -A NumPy sub-namespace that conforms to the Python array API standard. - -This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It -is still considered experimental, and will issue a warning when imported. - -This is a proof-of-concept namespace that wraps the corresponding NumPy -functions to give a conforming implementation of the Python array API standard -(https://data-apis.github.io/array-api/latest/). The standard is currently in -an RFC phase and comments on it are both welcome and encouraged. Comments -should be made either at https://github.com/data-apis/array-api or at -https://github.com/data-apis/consortium-feedback/discussions. - -NumPy already follows the proposed spec for the most part, so this module -serves mostly as a thin wrapper around it. However, NumPy also implements a -lot of behavior that is not included in the spec, so this serves as a -restricted subset of the API. Only those functions that are part of the spec -are included in this namespace, and all functions are given with the exact -signature given in the spec, including the use of position-only arguments, and -omitting any extra keyword arguments implemented by NumPy but not part of the -spec. The behavior of some functions is also modified from the NumPy behavior -to conform to the standard. Note that the underlying array object itself is -wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule -is implemented in pure Python with no C extensions. - -The array API spec is designed as a "minimal API subset" and explicitly allows -libraries to include behaviors not specified by it. But users of this module -that intend to write portable code should be aware that only those behaviors -that are listed in the spec are guaranteed to be implemented across libraries. -Consequently, the NumPy implementation was chosen to be both conforming and -minimal, so that users can use this implementation of the array API namespace -and be sure that behaviors that it defines will be available in conforming -namespaces from other libraries. - -A few notes about the current state of this submodule: - -- There is a test suite that tests modules against the array API standard at - https://github.com/data-apis/array-api-tests. The test suite is still a work - in progress, but the existing tests pass on this module, with a few - exceptions: - - - DLPack support (see https://github.com/data-apis/array-api/pull/106) is - not included here, as it requires a full implementation in NumPy proper - first. - - The test suite is not yet complete, and even the tests that exist are not - guaranteed to give a comprehensive coverage of the spec. Therefore, when - reviewing and using this submodule, you should refer to the standard - documents themselves. There are some tests in numpy.array_api.tests, but - they primarily focus on things that are not tested by the official array API - test suite. - -- There is a custom array object, numpy.array_api.Array, which is returned by - all functions in this module. All functions in the array API namespace - implicitly assume that they will only receive this object as input. The only - way to create instances of this object is to use one of the array creation - functions. It does not have a public constructor on the object itself. The - object is a small wrapper class around numpy.ndarray. The main purpose of it - is to restrict the namespace of the array object to only those dtypes and - only those methods that are required by the spec, as well as to limit/change - certain behavior that differs in the spec. In particular: - - - The array API namespace does not have scalar objects, only 0-D arrays. - Operations on Array that would create a scalar in NumPy create a 0-D - array. - - - Indexing: Only a subset of indices supported by NumPy are required by the - spec. The Array object restricts indexing to only allow those types of - indices that are required by the spec. See the docstring of the - numpy.array_api.Array._validate_indices helper function for more - information. - - - Type promotion: Some type promotion rules are different in the spec. In - particular, the spec does not have any value-based casting. The spec also - does not require cross-kind casting, like integer -> floating-point. Only - those promotions that are explicitly required by the array API - specification are allowed in this module. See NEP 47 for more info. - - - Functions do not automatically call asarray() on their input, and will not - work if the input type is not Array. The exception is array creation - functions, and Python operators on the Array object, which accept Python - scalars of the same type as the array dtype. - -- All functions include type annotations, corresponding to those given in the - spec (see _typing.py for definitions of some custom types). These do not - currently fully pass mypy due to some limitations in mypy. - -- Dtype objects are just the NumPy dtype objects, e.g., float64 = - np.dtype('float64'). The spec does not require any behavior on these dtype - objects other than that they be accessible by name and be comparable by - equality, but it was considered too much extra complexity to create custom - objects to represent dtypes. - -- All places where the implementations in this submodule are known to deviate - from their corresponding functions in NumPy are marked with "# Note:" - comments. - -Still TODO in this module are: - -- DLPack support for numpy.ndarray is still in progress. See - https://github.com/numpy/numpy/pull/19083. - -- The copy=False keyword argument to asarray() is not yet implemented. This - requires support in numpy.asarray() first. - -- Some functions are not yet fully tested in the array API test suite, and may - require updates that are not yet known until the tests are written. - -- The spec is still in an RFC phase and may still have minor updates, which - will need to be reflected here. - -- Complex number support in array API spec is planned but not yet finalized, - as are the fft extension and certain linear algebra functions such as eig - that require complex dtypes. - -""" - -import warnings - -warnings.warn( - "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2 -) - -__array_api_version__ = "2022.12" - -__all__ = ["__array_api_version__"] - -from ._constants import e, inf, nan, pi - -__all__ += ["e", "inf", "nan", "pi"] - -from ._creation_functions import ( - asarray, - arange, - empty, - empty_like, - eye, - from_dlpack, - full, - full_like, - linspace, - meshgrid, - ones, - ones_like, - tril, - triu, - zeros, - zeros_like, -) - -__all__ += [ - "asarray", - "arange", - "empty", - "empty_like", - "eye", - "from_dlpack", - "full", - "full_like", - "linspace", - "meshgrid", - "ones", - "ones_like", - "tril", - "triu", - "zeros", - "zeros_like", -] - -from ._data_type_functions import ( - astype, - broadcast_arrays, - broadcast_to, - can_cast, - finfo, - isdtype, - iinfo, - result_type, -) - -__all__ += [ - "astype", - "broadcast_arrays", - "broadcast_to", - "can_cast", - "finfo", - "iinfo", - "result_type", -] - -from ._dtypes import ( - int8, - int16, - int32, - int64, - uint8, - uint16, - uint32, - uint64, - float32, - float64, - complex64, - complex128, - bool, -) - -__all__ += [ - "int8", - "int16", - "int32", - "int64", - "uint8", - "uint16", - "uint32", - "uint64", - "float32", - "float64", - "bool", -] - -from ._elementwise_functions import ( - abs, - acos, - acosh, - add, - asin, - asinh, - atan, - atan2, - atanh, - bitwise_and, - bitwise_left_shift, - bitwise_invert, - bitwise_or, - bitwise_right_shift, - bitwise_xor, - ceil, - conj, - cos, - cosh, - divide, - equal, - exp, - expm1, - floor, - floor_divide, - greater, - greater_equal, - imag, - isfinite, - isinf, - isnan, - less, - less_equal, - log, - log1p, - log2, - log10, - logaddexp, - logical_and, - logical_not, - logical_or, - logical_xor, - multiply, - negative, - not_equal, - positive, - pow, - real, - remainder, - round, - sign, - sin, - sinh, - square, - sqrt, - subtract, - tan, - tanh, - trunc, -) - -__all__ += [ - "abs", - "acos", - "acosh", - "add", - "asin", - "asinh", - "atan", - "atan2", - "atanh", - "bitwise_and", - "bitwise_left_shift", - "bitwise_invert", - "bitwise_or", - "bitwise_right_shift", - "bitwise_xor", - "ceil", - "cos", - "cosh", - "divide", - "equal", - "exp", - "expm1", - "floor", - "floor_divide", - "greater", - "greater_equal", - "isfinite", - "isinf", - "isnan", - "less", - "less_equal", - "log", - "log1p", - "log2", - "log10", - "logaddexp", - "logical_and", - "logical_not", - "logical_or", - "logical_xor", - "multiply", - "negative", - "not_equal", - "positive", - "pow", - "remainder", - "round", - "sign", - "sin", - "sinh", - "square", - "sqrt", - "subtract", - "tan", - "tanh", - "trunc", -] - -from ._indexing_functions import take - -__all__ += ["take"] - -# linalg is an extension in the array API spec, which is a sub-namespace. Only -# a subset of functions in it are imported into the top-level namespace. -from . import linalg - -__all__ += ["linalg"] - -from .linalg import matmul, tensordot, matrix_transpose, vecdot - -__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"] - -from ._manipulation_functions import ( - concat, - expand_dims, - flip, - permute_dims, - reshape, - roll, - squeeze, - stack, -) - -__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"] - -from ._searching_functions import argmax, argmin, nonzero, where - -__all__ += ["argmax", "argmin", "nonzero", "where"] - -from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values - -__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"] - -from ._sorting_functions import argsort, sort - -__all__ += ["argsort", "sort"] - -from ._statistical_functions import max, mean, min, prod, std, sum, var - -__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"] - -from ._utility_functions import all, any - -__all__ += ["all", "any"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py deleted file mode 100644 index 2eef240af53f8f6902de472c5951857bb4ab580f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py +++ /dev/null @@ -1,629 +0,0 @@ -from __future__ import annotations - -from functools import partial -import re -from typing import ( - TYPE_CHECKING, - Callable, - Union, -) -import warnings - -import numpy as np - -from pandas._libs import ( - lib, - missing as libmissing, -) -from pandas.compat import pa_version_under7p0 -from pandas.util._exceptions import find_stack_level - -from pandas.core.dtypes.common import ( - is_bool_dtype, - is_integer_dtype, - is_object_dtype, - is_scalar, - is_string_dtype, - pandas_dtype, -) -from pandas.core.dtypes.missing import isna - -from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin -from pandas.core.arrays.arrow import ArrowExtensionArray -from pandas.core.arrays.boolean import BooleanDtype -from pandas.core.arrays.integer import Int64Dtype -from pandas.core.arrays.numeric import NumericDtype -from pandas.core.arrays.string_ import ( - BaseStringArray, - StringDtype, -) -from pandas.core.strings.object_array import ObjectStringArrayMixin - -if not pa_version_under7p0: - import pyarrow as pa - import pyarrow.compute as pc - - from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning - - -if TYPE_CHECKING: - from pandas._typing import ( - AxisInt, - Dtype, - Scalar, - npt, - ) - - -ArrowStringScalarOrNAT = Union[str, libmissing.NAType] - - -def _chk_pyarrow_available() -> None: - if pa_version_under7p0: - msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray." - raise ImportError(msg) - - -# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from -# ObjectStringArrayMixin because we want to have the object-dtype based methods as -# fallback for the ones that pyarrow doesn't yet support - - -class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray): - """ - Extension array for string data in a ``pyarrow.ChunkedArray``. - - .. versionadded:: 1.2.0 - - .. warning:: - - ArrowStringArray is considered experimental. The implementation and - parts of the API may change without warning. - - Parameters - ---------- - values : pyarrow.Array or pyarrow.ChunkedArray - The array of data. - - Attributes - ---------- - None - - Methods - ------- - None - - See Also - -------- - :func:`pandas.array` - The recommended function for creating a ArrowStringArray. - Series.str - The string methods are available on Series backed by - a ArrowStringArray. - - Notes - ----- - ArrowStringArray returns a BooleanArray for comparison methods. - - Examples - -------- - >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]") - - ['This is', 'some text', , 'data.'] - Length: 4, dtype: string - """ - - # error: Incompatible types in assignment (expression has type "StringDtype", - # base class "ArrowExtensionArray" defined the type as "ArrowDtype") - _dtype: StringDtype # type: ignore[assignment] - _storage = "pyarrow" - - def __init__(self, values) -> None: - super().__init__(values) - self._dtype = StringDtype(storage=self._storage) - - if not pa.types.is_string(self._pa_array.type) and not ( - pa.types.is_dictionary(self._pa_array.type) - and pa.types.is_string(self._pa_array.type.value_type) - ): - raise ValueError( - "ArrowStringArray requires a PyArrow (chunked) array of string type" - ) - - def __len__(self) -> int: - """ - Length of this array. - - Returns - ------- - length : int - """ - return len(self._pa_array) - - @classmethod - def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False): - from pandas.core.arrays.masked import BaseMaskedArray - - _chk_pyarrow_available() - - if dtype and not (isinstance(dtype, str) and dtype == "string"): - dtype = pandas_dtype(dtype) - assert isinstance(dtype, StringDtype) and dtype.storage in ( - "pyarrow", - "pyarrow_numpy", - ) - - if isinstance(scalars, BaseMaskedArray): - # avoid costly conversion to object dtype in ensure_string_array and - # numerical issues with Float32Dtype - na_values = scalars._mask - result = scalars._data - result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) - return cls(pa.array(result, mask=na_values, type=pa.string())) - elif isinstance(scalars, (pa.Array, pa.ChunkedArray)): - return cls(pc.cast(scalars, pa.string())) - - # convert non-na-likes to str - result = lib.ensure_string_array(scalars, copy=copy) - return cls(pa.array(result, type=pa.string(), from_pandas=True)) - - @classmethod - def _from_sequence_of_strings( - cls, strings, dtype: Dtype | None = None, copy: bool = False - ): - return cls._from_sequence(strings, dtype=dtype, copy=copy) - - @property - def dtype(self) -> StringDtype: # type: ignore[override] - """ - An instance of 'string[pyarrow]'. - """ - return self._dtype - - def insert(self, loc: int, item) -> ArrowStringArray: - if not isinstance(item, str) and item is not libmissing.NA: - raise TypeError("Scalar must be NA or str") - return super().insert(loc, item) - - @classmethod - def _result_converter(cls, values, na=None): - return BooleanDtype().__from_arrow__(values) - - def _maybe_convert_setitem_value(self, value): - """Maybe convert value to be pyarrow compatible.""" - if is_scalar(value): - if isna(value): - value = None - elif not isinstance(value, str): - raise TypeError("Scalar must be NA or str") - else: - value = np.array(value, dtype=object, copy=True) - value[isna(value)] = None - for v in value: - if not (v is None or isinstance(v, str)): - raise TypeError("Scalar must be NA or str") - return super()._maybe_convert_setitem_value(value) - - def isin(self, values) -> npt.NDArray[np.bool_]: - value_set = [ - pa_scalar.as_py() - for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values] - if pa_scalar.type in (pa.string(), pa.null()) - ] - - # short-circuit to return all False array. - if not len(value_set): - return np.zeros(len(self), dtype=bool) - - result = pc.is_in(self._pa_array, value_set=pa.array(value_set)) - # pyarrow 2.0.0 returned nulls, so we explicily specify dtype to convert nulls - # to False - return np.array(result, dtype=np.bool_) - - def astype(self, dtype, copy: bool = True): - dtype = pandas_dtype(dtype) - - if dtype == self.dtype: - if copy: - return self.copy() - return self - elif isinstance(dtype, NumericDtype): - data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype)) - return dtype.__from_arrow__(data) - elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating): - return self.to_numpy(dtype=dtype, na_value=np.nan) - - return super().astype(dtype, copy=copy) - - @property - def _data(self): - # dask accesses ._data directlys - warnings.warn( - f"{type(self).__name__}._data is a deprecated and will be removed " - "in a future version, use ._pa_array instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self._pa_array - - # ------------------------------------------------------------------------ - # String methods interface - - # error: Incompatible types in assignment (expression has type "NAType", - # base class "ObjectStringArrayMixin" defined the type as "float") - _str_na_value = libmissing.NA # type: ignore[assignment] - - def _str_map( - self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True - ): - # TODO: de-duplicate with StringArray method. This method is moreless copy and - # paste. - - from pandas.arrays import ( - BooleanArray, - IntegerArray, - ) - - if dtype is None: - dtype = self.dtype - if na_value is None: - na_value = self.dtype.na_value - - mask = isna(self) - arr = np.asarray(self) - - if is_integer_dtype(dtype) or is_bool_dtype(dtype): - constructor: type[IntegerArray] | type[BooleanArray] - if is_integer_dtype(dtype): - constructor = IntegerArray - else: - constructor = BooleanArray - - na_value_is_na = isna(na_value) - if na_value_is_na: - na_value = 1 - result = lib.map_infer_mask( - arr, - f, - mask.view("uint8"), - convert=False, - na_value=na_value, - # error: Argument 1 to "dtype" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected - # "Type[object]" - dtype=np.dtype(dtype), # type: ignore[arg-type] - ) - - if not na_value_is_na: - mask[:] = False - - return constructor(result, mask) - - elif is_string_dtype(dtype) and not is_object_dtype(dtype): - # i.e. StringDtype - result = lib.map_infer_mask( - arr, f, mask.view("uint8"), convert=False, na_value=na_value - ) - result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) - return type(self)(result) - else: - # This is when the result type is object. We reach this when - # -> We know the result type is truly object (e.g. .encode returns bytes - # or .findall returns a list). - # -> We don't know the result type. E.g. `.get` can return anything. - return lib.map_infer_mask(arr, f, mask.view("uint8")) - - def _str_contains( - self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True - ): - if flags: - fallback_performancewarning() - return super()._str_contains(pat, case, flags, na, regex) - - if regex: - result = pc.match_substring_regex(self._pa_array, pat, ignore_case=not case) - else: - result = pc.match_substring(self._pa_array, pat, ignore_case=not case) - result = self._result_converter(result, na=na) - if not isna(na): - result[isna(result)] = bool(na) - return result - - def _str_startswith(self, pat: str, na=None): - result = pc.starts_with(self._pa_array, pattern=pat) - if not isna(na): - result = result.fill_null(na) - result = self._result_converter(result) - if not isna(na): - result[isna(result)] = bool(na) - return result - - def _str_endswith(self, pat: str, na=None): - result = pc.ends_with(self._pa_array, pattern=pat) - if not isna(na): - result = result.fill_null(na) - result = self._result_converter(result) - if not isna(na): - result[isna(result)] = bool(na) - return result - - def _str_replace( - self, - pat: str | re.Pattern, - repl: str | Callable, - n: int = -1, - case: bool = True, - flags: int = 0, - regex: bool = True, - ): - if isinstance(pat, re.Pattern) or callable(repl) or not case or flags: - fallback_performancewarning() - return super()._str_replace(pat, repl, n, case, flags, regex) - - func = pc.replace_substring_regex if regex else pc.replace_substring - result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n) - return type(self)(result) - - def _str_match( - self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None - ): - if not pat.startswith("^"): - pat = f"^{pat}" - return self._str_contains(pat, case, flags, na, regex=True) - - def _str_fullmatch( - self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None - ): - if not pat.endswith("$") or pat.endswith("//$"): - pat = f"{pat}$" - return self._str_match(pat, case, flags, na) - - def _str_isalnum(self): - result = pc.utf8_is_alnum(self._pa_array) - return self._result_converter(result) - - def _str_isalpha(self): - result = pc.utf8_is_alpha(self._pa_array) - return self._result_converter(result) - - def _str_isdecimal(self): - result = pc.utf8_is_decimal(self._pa_array) - return self._result_converter(result) - - def _str_isdigit(self): - result = pc.utf8_is_digit(self._pa_array) - return self._result_converter(result) - - def _str_islower(self): - result = pc.utf8_is_lower(self._pa_array) - return self._result_converter(result) - - def _str_isnumeric(self): - result = pc.utf8_is_numeric(self._pa_array) - return self._result_converter(result) - - def _str_isspace(self): - result = pc.utf8_is_space(self._pa_array) - return self._result_converter(result) - - def _str_istitle(self): - result = pc.utf8_is_title(self._pa_array) - return self._result_converter(result) - - def _str_isupper(self): - result = pc.utf8_is_upper(self._pa_array) - return self._result_converter(result) - - def _str_len(self): - result = pc.utf8_length(self._pa_array) - return Int64Dtype().__from_arrow__(result) - - def _str_lower(self): - return type(self)(pc.utf8_lower(self._pa_array)) - - def _str_upper(self): - return type(self)(pc.utf8_upper(self._pa_array)) - - def _str_strip(self, to_strip=None): - if to_strip is None: - result = pc.utf8_trim_whitespace(self._pa_array) - else: - result = pc.utf8_trim(self._pa_array, characters=to_strip) - return type(self)(result) - - def _str_lstrip(self, to_strip=None): - if to_strip is None: - result = pc.utf8_ltrim_whitespace(self._pa_array) - else: - result = pc.utf8_ltrim(self._pa_array, characters=to_strip) - return type(self)(result) - - def _str_rstrip(self, to_strip=None): - if to_strip is None: - result = pc.utf8_rtrim_whitespace(self._pa_array) - else: - result = pc.utf8_rtrim(self._pa_array, characters=to_strip) - return type(self)(result) - - def _reduce( - self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs - ): - result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) - if name in ("argmin", "argmax") and isinstance(result, pa.Array): - return self._convert_int_dtype(result) - elif isinstance(result, pa.Array): - return type(self)(result) - else: - return result - - def _convert_int_dtype(self, result): - return Int64Dtype().__from_arrow__(result) - - def _rank( - self, - *, - axis: AxisInt = 0, - method: str = "average", - na_option: str = "keep", - ascending: bool = True, - pct: bool = False, - ): - """ - See Series.rank.__doc__. - """ - return self._convert_int_dtype( - self._rank_calc( - axis=axis, - method=method, - na_option=na_option, - ascending=ascending, - pct=pct, - ) - ) - - -class ArrowStringArrayNumpySemantics(ArrowStringArray): - _storage = "pyarrow_numpy" - - def __init__(self, values) -> None: - _chk_pyarrow_available() - - if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_large_string( - values.type - ): - values = pc.cast(values, pa.string()) - super().__init__(values) - - @classmethod - def _result_converter(cls, values, na=None): - if not isna(na): - values = values.fill_null(bool(na)) - return ArrowExtensionArray(values).to_numpy(na_value=np.nan) - - def __getattribute__(self, item): - # ArrowStringArray and we both inherit from ArrowExtensionArray, which - # creates inheritance problems (Diamond inheritance) - if item in ArrowStringArrayMixin.__dict__ and item not in ( - "_pa_array", - "__dict__", - ): - return partial(getattr(ArrowStringArrayMixin, item), self) - return super().__getattribute__(item) - - def _str_map( - self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True - ): - if dtype is None: - dtype = self.dtype - if na_value is None: - na_value = self.dtype.na_value - - mask = isna(self) - arr = np.asarray(self) - - if is_integer_dtype(dtype) or is_bool_dtype(dtype): - if is_integer_dtype(dtype): - na_value = np.nan - else: - na_value = False - try: - result = lib.map_infer_mask( - arr, - f, - mask.view("uint8"), - convert=False, - na_value=na_value, - dtype=np.dtype(dtype), # type: ignore[arg-type] - ) - return result - - except ValueError: - result = lib.map_infer_mask( - arr, - f, - mask.view("uint8"), - convert=False, - na_value=na_value, - ) - if convert and result.dtype == object: - result = lib.maybe_convert_objects(result) - return result - - elif is_string_dtype(dtype) and not is_object_dtype(dtype): - # i.e. StringDtype - result = lib.map_infer_mask( - arr, f, mask.view("uint8"), convert=False, na_value=na_value - ) - result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) - return type(self)(result) - else: - # This is when the result type is object. We reach this when - # -> We know the result type is truly object (e.g. .encode returns bytes - # or .findall returns a list). - # -> We don't know the result type. E.g. `.get` can return anything. - return lib.map_infer_mask(arr, f, mask.view("uint8")) - - def _convert_int_dtype(self, result): - if isinstance(result, pa.Array): - result = result.to_numpy(zero_copy_only=False) - elif not isinstance(result, np.ndarray): - result = result.to_numpy() - if result.dtype == np.int32: - result = result.astype(np.int64) - return result - - def _str_count(self, pat: str, flags: int = 0): - if flags: - return super()._str_count(pat, flags) - result = pc.count_substring_regex(self._pa_array, pat).to_numpy() - return self._convert_int_dtype(result) - - def _str_len(self): - result = pc.utf8_length(self._pa_array).to_numpy() - return self._convert_int_dtype(result) - - def _str_find(self, sub: str, start: int = 0, end: int | None = None): - if start != 0 and end is not None: - slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) - result = pc.find_substring(slices, sub) - not_found = pc.equal(result, -1) - offset_result = pc.add(result, end - start) - result = pc.if_else(not_found, result, offset_result) - elif start == 0 and end is None: - slices = self._pa_array - result = pc.find_substring(slices, sub) - else: - return super()._str_find(sub, start, end) - return self._convert_int_dtype(result.to_numpy()) - - def _cmp_method(self, other, op): - result = super()._cmp_method(other, op) - return result.to_numpy(np.bool_, na_value=False) - - def value_counts(self, dropna: bool = True): - from pandas import Series - - result = super().value_counts(dropna) - return Series( - result._values.to_numpy(), index=result.index, name=result.name, copy=False - ) - - def _reduce( - self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs - ): - if name in ["any", "all"]: - if not skipna and name == "all": - nas = pc.invert(pc.is_null(self._pa_array)) - arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, "")) - else: - arr = pc.not_equal(self._pa_array, "") - return ArrowExtensionArray(arr)._reduce( - name, skipna=skipna, keepdims=keepdims, **kwargs - ) - else: - return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) - - def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics: - if item is np.nan: - item = libmissing.NA - return super().insert(loc, item) # type: ignore[return-value] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/mbcsgroupprober.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/mbcsgroupprober.py deleted file mode 100644 index 530abe75e0c00cbfcb2a310d872866f320977d0a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/mbcsgroupprober.py +++ /dev/null @@ -1,54 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# Proofpoint, Inc. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetgroupprober import CharSetGroupProber -from .utf8prober import UTF8Prober -from .sjisprober import SJISProber -from .eucjpprober import EUCJPProber -from .gb2312prober import GB2312Prober -from .euckrprober import EUCKRProber -from .cp949prober import CP949Prober -from .big5prober import Big5Prober -from .euctwprober import EUCTWProber - - -class MBCSGroupProber(CharSetGroupProber): - def __init__(self, lang_filter=None): - super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) - self.probers = [ - UTF8Prober(), - SJISProber(), - EUCJPProber(), - GB2312Prober(), - EUCKRProber(), - CP949Prober(), - Big5Prober(), - EUCTWProber() - ] - self.reset() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/generics.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/generics.py deleted file mode 100644 index a75b6b987da7335f390945ee40195ea2f96c65e9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/generics.py +++ /dev/null @@ -1,400 +0,0 @@ -import sys -import types -import typing -from typing import ( - TYPE_CHECKING, - Any, - ClassVar, - Dict, - ForwardRef, - Generic, - Iterator, - List, - Mapping, - Optional, - Tuple, - Type, - TypeVar, - Union, - cast, -) -from weakref import WeakKeyDictionary, WeakValueDictionary - -from typing_extensions import Annotated, Literal as ExtLiteral - -from .class_validators import gather_all_validators -from .fields import DeferredType -from .main import BaseModel, create_model -from .types import JsonWrapper -from .typing import display_as_type, get_all_type_hints, get_args, get_origin, typing_base -from .utils import all_identical, lenient_issubclass - -if sys.version_info >= (3, 10): - from typing import _UnionGenericAlias -if sys.version_info >= (3, 8): - from typing import Literal - -GenericModelT = TypeVar('GenericModelT', bound='GenericModel') -TypeVarType = Any # since mypy doesn't allow the use of TypeVar as a type - -CacheKey = Tuple[Type[Any], Any, Tuple[Any, ...]] -Parametrization = Mapping[TypeVarType, Type[Any]] - -# weak dictionaries allow the dynamically created parametrized versions of generic models to get collected -# once they are no longer referenced by the caller. -if sys.version_info >= (3, 9): # Typing for weak dictionaries available at 3.9 - GenericTypesCache = WeakValueDictionary[CacheKey, Type[BaseModel]] - AssignedParameters = WeakKeyDictionary[Type[BaseModel], Parametrization] -else: - GenericTypesCache = WeakValueDictionary - AssignedParameters = WeakKeyDictionary - -# _generic_types_cache is a Mapping from __class_getitem__ arguments to the parametrized version of generic models. -# This ensures multiple calls of e.g. A[B] return always the same class. -_generic_types_cache = GenericTypesCache() - -# _assigned_parameters is a Mapping from parametrized version of generic models to assigned types of parametrizations -# as captured during construction of the class (not instances). -# E.g., for generic model `Model[A, B]`, when parametrized model `Model[int, str]` is created, -# `Model[int, str]`: {A: int, B: str}` will be stored in `_assigned_parameters`. -# (This information is only otherwise available after creation from the class name string). -_assigned_parameters = AssignedParameters() - - -class GenericModel(BaseModel): - __slots__ = () - __concrete__: ClassVar[bool] = False - - if TYPE_CHECKING: - # Putting this in a TYPE_CHECKING block allows us to replace `if Generic not in cls.__bases__` with - # `not hasattr(cls, "__parameters__")`. This means we don't need to force non-concrete subclasses of - # `GenericModel` to also inherit from `Generic`, which would require changes to the use of `create_model` below. - __parameters__: ClassVar[Tuple[TypeVarType, ...]] - - # Setting the return type as Type[Any] instead of Type[BaseModel] prevents PyCharm warnings - def __class_getitem__(cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]]) -> Type[Any]: - """Instantiates a new class from a generic class `cls` and type variables `params`. - - :param params: Tuple of types the class . Given a generic class - `Model` with 2 type variables and a concrete model `Model[str, int]`, - the value `(str, int)` would be passed to `params`. - :return: New model class inheriting from `cls` with instantiated - types described by `params`. If no parameters are given, `cls` is - returned as is. - - """ - - def _cache_key(_params: Any) -> CacheKey: - args = get_args(_params) - # python returns a list for Callables, which is not hashable - if len(args) == 2 and isinstance(args[0], list): - args = (tuple(args[0]), args[1]) - return cls, _params, args - - cached = _generic_types_cache.get(_cache_key(params)) - if cached is not None: - return cached - if cls.__concrete__ and Generic not in cls.__bases__: - raise TypeError('Cannot parameterize a concrete instantiation of a generic model') - if not isinstance(params, tuple): - params = (params,) - if cls is GenericModel and any(isinstance(param, TypeVar) for param in params): - raise TypeError('Type parameters should be placed on typing.Generic, not GenericModel') - if not hasattr(cls, '__parameters__'): - raise TypeError(f'Type {cls.__name__} must inherit from typing.Generic before being parameterized') - - check_parameters_count(cls, params) - # Build map from generic typevars to passed params - typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params)) - if all_identical(typevars_map.keys(), typevars_map.values()) and typevars_map: - return cls # if arguments are equal to parameters it's the same object - - # Create new model with original model as parent inserting fields with DeferredType. - model_name = cls.__concrete_name__(params) - validators = gather_all_validators(cls) - - type_hints = get_all_type_hints(cls).items() - instance_type_hints = {k: v for k, v in type_hints if get_origin(v) is not ClassVar} - - fields = {k: (DeferredType(), cls.__fields__[k].field_info) for k in instance_type_hints if k in cls.__fields__} - - model_module, called_globally = get_caller_frame_info() - created_model = cast( - Type[GenericModel], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes - create_model( - model_name, - __module__=model_module or cls.__module__, - __base__=(cls,) + tuple(cls.__parameterized_bases__(typevars_map)), - __config__=None, - __validators__=validators, - __cls_kwargs__=None, - **fields, - ), - ) - - _assigned_parameters[created_model] = typevars_map - - if called_globally: # create global reference and therefore allow pickling - object_by_reference = None - reference_name = model_name - reference_module_globals = sys.modules[created_model.__module__].__dict__ - while object_by_reference is not created_model: - object_by_reference = reference_module_globals.setdefault(reference_name, created_model) - reference_name += '_' - - created_model.Config = cls.Config - - # Find any typevars that are still present in the model. - # If none are left, the model is fully "concrete", otherwise the new - # class is a generic class as well taking the found typevars as - # parameters. - new_params = tuple( - {param: None for param in iter_contained_typevars(typevars_map.values())} - ) # use dict as ordered set - created_model.__concrete__ = not new_params - if new_params: - created_model.__parameters__ = new_params - - # Save created model in cache so we don't end up creating duplicate - # models that should be identical. - _generic_types_cache[_cache_key(params)] = created_model - if len(params) == 1: - _generic_types_cache[_cache_key(params[0])] = created_model - - # Recursively walk class type hints and replace generic typevars - # with concrete types that were passed. - _prepare_model_fields(created_model, fields, instance_type_hints, typevars_map) - - return created_model - - @classmethod - def __concrete_name__(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str: - """Compute class name for child classes. - - :param params: Tuple of types the class . Given a generic class - `Model` with 2 type variables and a concrete model `Model[str, int]`, - the value `(str, int)` would be passed to `params`. - :return: String representing a the new class where `params` are - passed to `cls` as type variables. - - This method can be overridden to achieve a custom naming scheme for GenericModels. - """ - param_names = [display_as_type(param) for param in params] - params_component = ', '.join(param_names) - return f'{cls.__name__}[{params_component}]' - - @classmethod - def __parameterized_bases__(cls, typevars_map: Parametrization) -> Iterator[Type[Any]]: - """ - Returns unbound bases of cls parameterised to given type variables - - :param typevars_map: Dictionary of type applications for binding subclasses. - Given a generic class `Model` with 2 type variables [S, T] - and a concrete model `Model[str, int]`, - the value `{S: str, T: int}` would be passed to `typevars_map`. - :return: an iterator of generic sub classes, parameterised by `typevars_map` - and other assigned parameters of `cls` - - e.g.: - ``` - class A(GenericModel, Generic[T]): - ... - - class B(A[V], Generic[V]): - ... - - assert A[int] in B.__parameterized_bases__({V: int}) - ``` - """ - - def build_base_model( - base_model: Type[GenericModel], mapped_types: Parametrization - ) -> Iterator[Type[GenericModel]]: - base_parameters = tuple(mapped_types[param] for param in base_model.__parameters__) - parameterized_base = base_model.__class_getitem__(base_parameters) - if parameterized_base is base_model or parameterized_base is cls: - # Avoid duplication in MRO - return - yield parameterized_base - - for base_model in cls.__bases__: - if not issubclass(base_model, GenericModel): - # not a class that can be meaningfully parameterized - continue - elif not getattr(base_model, '__parameters__', None): - # base_model is "GenericModel" (and has no __parameters__) - # or - # base_model is already concrete, and will be included transitively via cls. - continue - elif cls in _assigned_parameters: - if base_model in _assigned_parameters: - # cls is partially parameterised but not from base_model - # e.g. cls = B[S], base_model = A[S] - # B[S][int] should subclass A[int], (and will be transitively via B[int]) - # but it's not viable to consistently subclass types with arbitrary construction - # So don't attempt to include A[S][int] - continue - else: # base_model not in _assigned_parameters: - # cls is partially parameterized, base_model is original generic - # e.g. cls = B[str, T], base_model = B[S, T] - # Need to determine the mapping for the base_model parameters - mapped_types: Parametrization = { - key: typevars_map.get(value, value) for key, value in _assigned_parameters[cls].items() - } - yield from build_base_model(base_model, mapped_types) - else: - # cls is base generic, so base_class has a distinct base - # can construct the Parameterised base model using typevars_map directly - yield from build_base_model(base_model, typevars_map) - - -def replace_types(type_: Any, type_map: Mapping[Any, Any]) -> Any: - """Return type with all occurrences of `type_map` keys recursively replaced with their values. - - :param type_: Any type, class or generic alias - :param type_map: Mapping from `TypeVar` instance to concrete types. - :return: New type representing the basic structure of `type_` with all - `typevar_map` keys recursively replaced. - - >>> replace_types(Tuple[str, Union[List[str], float]], {str: int}) - Tuple[int, Union[List[int], float]] - - """ - if not type_map: - return type_ - - type_args = get_args(type_) - origin_type = get_origin(type_) - - if origin_type is Annotated: - annotated_type, *annotations = type_args - return Annotated[replace_types(annotated_type, type_map), tuple(annotations)] - - if (origin_type is ExtLiteral) or (sys.version_info >= (3, 8) and origin_type is Literal): - return type_map.get(type_, type_) - # Having type args is a good indicator that this is a typing module - # class instantiation or a generic alias of some sort. - if type_args: - resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args) - if all_identical(type_args, resolved_type_args): - # If all arguments are the same, there is no need to modify the - # type or create a new object at all - return type_ - if ( - origin_type is not None - and isinstance(type_, typing_base) - and not isinstance(origin_type, typing_base) - and getattr(type_, '_name', None) is not None - ): - # In python < 3.9 generic aliases don't exist so any of these like `list`, - # `type` or `collections.abc.Callable` need to be translated. - # See: https://www.python.org/dev/peps/pep-0585 - origin_type = getattr(typing, type_._name) - assert origin_type is not None - # PEP-604 syntax (Ex.: list | str) is represented with a types.UnionType object that does not have __getitem__. - # We also cannot use isinstance() since we have to compare types. - if sys.version_info >= (3, 10) and origin_type is types.UnionType: # noqa: E721 - return _UnionGenericAlias(origin_type, resolved_type_args) - return origin_type[resolved_type_args] - - # We handle pydantic generic models separately as they don't have the same - # semantics as "typing" classes or generic aliases - if not origin_type and lenient_issubclass(type_, GenericModel) and not type_.__concrete__: - type_args = type_.__parameters__ - resolved_type_args = tuple(replace_types(t, type_map) for t in type_args) - if all_identical(type_args, resolved_type_args): - return type_ - return type_[resolved_type_args] - - # Handle special case for typehints that can have lists as arguments. - # `typing.Callable[[int, str], int]` is an example for this. - if isinstance(type_, (List, list)): - resolved_list = list(replace_types(element, type_map) for element in type_) - if all_identical(type_, resolved_list): - return type_ - return resolved_list - - # For JsonWrapperValue, need to handle its inner type to allow correct parsing - # of generic Json arguments like Json[T] - if not origin_type and lenient_issubclass(type_, JsonWrapper): - type_.inner_type = replace_types(type_.inner_type, type_map) - return type_ - - # If all else fails, we try to resolve the type directly and otherwise just - # return the input with no modifications. - new_type = type_map.get(type_, type_) - # Convert string to ForwardRef - if isinstance(new_type, str): - return ForwardRef(new_type) - else: - return new_type - - -def check_parameters_count(cls: Type[GenericModel], parameters: Tuple[Any, ...]) -> None: - actual = len(parameters) - expected = len(cls.__parameters__) - if actual != expected: - description = 'many' if actual > expected else 'few' - raise TypeError(f'Too {description} parameters for {cls.__name__}; actual {actual}, expected {expected}') - - -DictValues: Type[Any] = {}.values().__class__ - - -def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]: - """Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found.""" - if isinstance(v, TypeVar): - yield v - elif hasattr(v, '__parameters__') and not get_origin(v) and lenient_issubclass(v, GenericModel): - yield from v.__parameters__ - elif isinstance(v, (DictValues, list)): - for var in v: - yield from iter_contained_typevars(var) - else: - args = get_args(v) - for arg in args: - yield from iter_contained_typevars(arg) - - -def get_caller_frame_info() -> Tuple[Optional[str], bool]: - """ - Used inside a function to check whether it was called globally - - Will only work against non-compiled code, therefore used only in pydantic.generics - - :returns Tuple[module_name, called_globally] - """ - try: - previous_caller_frame = sys._getframe(2) - except ValueError as e: - raise RuntimeError('This function must be used inside another function') from e - except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it - return None, False - frame_globals = previous_caller_frame.f_globals - return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals - - -def _prepare_model_fields( - created_model: Type[GenericModel], - fields: Mapping[str, Any], - instance_type_hints: Mapping[str, type], - typevars_map: Mapping[Any, type], -) -> None: - """ - Replace DeferredType fields with concrete type hints and prepare them. - """ - - for key, field in created_model.__fields__.items(): - if key not in fields: - assert field.type_.__class__ is not DeferredType - # https://github.com/nedbat/coveragepy/issues/198 - continue # pragma: no cover - - assert field.type_.__class__ is DeferredType, field.type_.__class__ - - field_type_hint = instance_type_hints[key] - concrete_type = replace_types(field_type_hint, typevars_map) - field.type_ = concrete_type - field.outer_type_ = concrete_type - field.prepare() - created_model.__annotations__[key] = concrete_type diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/web.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/web.py deleted file mode 100644 index 9e52653160fa810b4cab7af6e69298840bfdddcf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/web.py +++ /dev/null @@ -1,23 +0,0 @@ -""" - pygments.lexers.web - ~~~~~~~~~~~~~~~~~~~ - - Just export previously exported lexers. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \ - HamlLexer, ScamlLexer, JadeLexer -from pygments.lexers.css import CssLexer, SassLexer, ScssLexer -from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \ - DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer -from pygments.lexers.actionscript import ActionScriptLexer, \ - ActionScript3Lexer, MxmlLexer -from pygments.lexers.php import PhpLexer -from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer -from pygments.lexers.data import JsonLexer -JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5 - -__all__ = [] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/errors.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/errors.py deleted file mode 100644 index 8b93059e19faa9f821ffad1b8a298e7301fe8ab2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/errors.py +++ /dev/null @@ -1,97 +0,0 @@ -"""distutils.errors - -Provides exceptions used by the Distutils modules. Note that Distutils -modules may raise standard exceptions; in particular, SystemExit is -usually raised for errors that are obviously the end-user's fault -(eg. bad command-line arguments). - -This module is safe to use in "from ... import *" mode; it only exports -symbols whose names start with "Distutils" and end with "Error".""" - -class DistutilsError (Exception): - """The root of all Distutils evil.""" - pass - -class DistutilsModuleError (DistutilsError): - """Unable to load an expected module, or to find an expected class - within some module (in particular, command modules and classes).""" - pass - -class DistutilsClassError (DistutilsError): - """Some command class (or possibly distribution class, if anyone - feels a need to subclass Distribution) is found not to be holding - up its end of the bargain, ie. implementing some part of the - "command "interface.""" - pass - -class DistutilsGetoptError (DistutilsError): - """The option table provided to 'fancy_getopt()' is bogus.""" - pass - -class DistutilsArgError (DistutilsError): - """Raised by fancy_getopt in response to getopt.error -- ie. an - error in the command line usage.""" - pass - -class DistutilsFileError (DistutilsError): - """Any problems in the filesystem: expected file not found, etc. - Typically this is for problems that we detect before OSError - could be raised.""" - pass - -class DistutilsOptionError (DistutilsError): - """Syntactic/semantic errors in command options, such as use of - mutually conflicting options, or inconsistent options, - badly-spelled values, etc. No distinction is made between option - values originating in the setup script, the command line, config - files, or what-have-you -- but if we *know* something originated in - the setup script, we'll raise DistutilsSetupError instead.""" - pass - -class DistutilsSetupError (DistutilsError): - """For errors that can be definitely blamed on the setup script, - such as invalid keyword arguments to 'setup()'.""" - pass - -class DistutilsPlatformError (DistutilsError): - """We don't know how to do something on the current platform (but - we do know how to do it on some platform) -- eg. trying to compile - C files on a platform not supported by a CCompiler subclass.""" - pass - -class DistutilsExecError (DistutilsError): - """Any problems executing an external program (such as the C - compiler, when compiling C files).""" - pass - -class DistutilsInternalError (DistutilsError): - """Internal inconsistencies or impossibilities (obviously, this - should never be seen if the code is working!).""" - pass - -class DistutilsTemplateError (DistutilsError): - """Syntax error in a file list template.""" - -class DistutilsByteCompileError(DistutilsError): - """Byte compile error.""" - -# Exception classes used by the CCompiler implementation classes -class CCompilerError (Exception): - """Some compile/link operation failed.""" - -class PreprocessError (CCompilerError): - """Failure to preprocess one or more C/C++ files.""" - -class CompileError (CCompilerError): - """Failure to compile one or more C/C++ source files.""" - -class LibError (CCompilerError): - """Failure to create a static library from one or more C/C++ object - files.""" - -class LinkError (CCompilerError): - """Failure to link one or more C/C++ object files into an executable - or shared library file.""" - -class UnknownFileError (CCompilerError): - """Attempt to process an unknown file type.""" diff --git a/spaces/projekt-rising-ai/Expert-Answer-Demo/azure_utils.py b/spaces/projekt-rising-ai/Expert-Answer-Demo/azure_utils.py deleted file mode 100644 index 4173eaa689abe9b7b6b66ed3fcf1ede591655a53..0000000000000000000000000000000000000000 --- a/spaces/projekt-rising-ai/Expert-Answer-Demo/azure_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -# This class stores Azure voice data. Specifically, the class stores several records containing -# language, lang_code, gender, voice_id and engine. The class also has a method to return the -# voice_id, lang_code and engine given a language and gender. - -NEURAL_ENGINE = "neural" -STANDARD_ENGINE = "standard" - - -class AzureVoiceData: - def get_voice(self, language, gender): - for voice in self.voice_data: - if voice['language'] == language and voice['gender'] == gender: - return voice['azure_voice'] - return None - - def __init__(self): - self.voice_data = [ - {'language': 'Arabic', - 'azure_voice': 'ar-EG-ShakirNeural', - 'gender': 'Male'}, - {'language': 'Arabic (Gulf)', - 'azure_voice': 'ar-KW-FahedNeural', - 'gender': 'Male'}, - {'language': 'Catalan', - 'azure_voice': 'ca-ES-EnricNeural', - 'gender': 'Male'}, - {'language': 'Chinese (Cantonese)', - 'azure_voice': 'yue-CN-YunSongNeural', - 'gender': 'Male'}, - {'language': 'Chinese (Mandarin)', - 'azure_voice': 'zh-CN-YunxiNeural', - 'gender': 'Male'}, - {'language': 'Danish', - 'azure_voice': 'da-DK-JeppeNeural', - 'gender': 'Male'}, - {'language': 'Dutch', - 'azure_voice': 'nl-NL-MaartenNeural', - 'gender': 'Male'}, - {'language': 'English (Australian)', - 'azure_voice': 'en-AU-KenNeural', - 'gender': 'Male'}, - {'language': 'English (British)', - 'azure_voice': 'en-GB-RyanNeural', - 'gender': 'Male'}, - {'language': 'English (Indian)', - 'azure_voice': 'en-IN-PrabhatNeural', - 'gender': 'Male'}, - {'language': 'English (New Zealand)', - 'azure_voice': 'en-NZ-MitchellNeural', - 'gender': 'Male'}, - {'language': 'English (South African)', - 'azure_voice': 'en-ZA-LukeNeural', - 'gender': 'Male'}, - {'language': 'English (US)', - 'azure_voice': 'en-US-ChristopherNeural', - 'gender': 'Male'}, - {'language': 'English (Welsh)', - 'azure_voice': 'cy-GB-AledNeural', - 'gender': 'Male'}, - {'language': 'Finnish', - 'azure_voice': 'fi-FI-HarriNeural', - 'gender': 'Male'}, - {'language': 'French', - 'azure_voice': 'fr-FR-HenriNeural', - 'gender': 'Male'}, - {'language': 'French (Canadian)', - 'azure_voice': 'fr-CA-AntoineNeural', - 'gender': 'Male'}, - {'language': 'German', - 'azure_voice': 'de-DE-KlausNeural', - 'gender': 'Male'}, - {'language': 'German (Austrian)', - 'azure_voice': 'de-AT-JonasNeural', - 'gender': 'Male'}, - {'language': 'Hindi', - 'azure_voice': 'hi-IN-MadhurNeural', - 'gender': 'Male'}, - {'language': 'Icelandic', - 'azure_voice': 'is-IS-GunnarNeural', - 'gender': 'Male'}, - {'language': 'Italian', - 'azure_voice': 'it-IT-GianniNeural', - 'gender': 'Male'}, - {'language': 'Japanese', - 'azure_voice': 'ja-JP-KeitaNeural', - 'gender': 'Male'}, - {'language': 'Korean', - 'azure_voice': 'ko-KR-GookMinNeural', - 'gender': 'Male'}, - {'language': 'Norwegian', - 'azure_voice': 'nb-NO-FinnNeural', - 'gender': 'Male'}, - {'language': 'Polish', - 'azure_voice': 'pl-PL-MarekNeural', - 'gender': 'Male'}, - {'language': 'Portuguese (Brazilian)', - 'azure_voice': 'pt-BR-NicolauNeural', - 'gender': 'Male'}, - {'language': 'Portuguese (European)', - 'azure_voice': 'pt-PT-DuarteNeural', - 'gender': 'Male'}, - {'language': 'Romanian', - 'azure_voice': 'ro-RO-EmilNeural', - 'gender': 'Male'}, - {'language': 'Russian', - 'azure_voice': 'ru-RU-DmitryNeural', - 'gender': 'Male'}, - {'language': 'Spanish (European)', - 'azure_voice': 'es-ES-TeoNeural', - 'gender': 'Male'}, - {'language': 'Spanish (Mexican)', - 'azure_voice': 'es-MX-LibertoNeural', - 'gender': 'Male'}, - {'language': 'Spanish (US)', - 'azure_voice': 'es-US-AlonsoNeural"', - 'gender': 'Male'}, - {'language': 'Swedish', - 'azure_voice': 'sv-SE-MattiasNeural', - 'gender': 'Male'}, - {'language': 'Turkish', - 'azure_voice': 'tr-TR-AhmetNeural', - 'gender': 'Male'}, - {'language': 'Welsh', - 'azure_voice': 'cy-GB-AledNeural', - 'gender': 'Male'}, - ] - - -# Run from the command-line -if __name__ == '__main__': - azure_voice_data = AzureVoiceData() - - azure_voice = azure_voice_data.get_voice('English (US)', 'Male') - print('English (US)', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('English (US)', 'Female') - print('English (US)', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('French', 'Female') - print('French', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('French', 'Male') - print('French', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('Japanese', 'Female') - print('Japanese', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('Japanese', 'Male') - print('Japanese', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('Hindi', 'Female') - print('Hindi', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('Hindi', 'Male') - print('Hindi', 'Male', azure_voice) diff --git a/spaces/pycui/RealChar/realtime_ai_character/audio/text_to_speech/__init__.py b/spaces/pycui/RealChar/realtime_ai_character/audio/text_to_speech/__init__.py deleted file mode 100644 index 238b644f0d4eb13d1aac6e3e8a82ecaf5ac91b0c..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/realtime_ai_character/audio/text_to_speech/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import os - -from realtime_ai_character.audio.text_to_speech.base import TextToSpeech - - -def get_text_to_speech() -> TextToSpeech: - use = os.getenv('TEXT_TO_SPEECH_USE', 'ELEVEN_LABS') - if use == 'ELEVEN_LABS': - from realtime_ai_character.audio.text_to_speech.elevenlabs import ElevenLabs - ElevenLabs.initialize() - return ElevenLabs.get_instance() - else: - raise NotImplementedError(f'Unknown text to speech engine: {use}') diff --git a/spaces/quidiaMuxgu/Expedit-SAM/BadDayBetsytorrentFull.md b/spaces/quidiaMuxgu/Expedit-SAM/BadDayBetsytorrentFull.md deleted file mode 100644 index a7c03333565c530c86bcdbc04dcdab36194db69a..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/BadDayBetsytorrentFull.md +++ /dev/null @@ -1,39 +0,0 @@ - -

    BadDayBetsytorrentFull - A Game That Will Make You Laugh and Cry

    - -

    Have you ever had a bad day? A really bad day? A day that makes you want to scream, cry, or give up? If you have, then you might relate to Bad Day Betsy, a hilarious and challenging game that will test your skills and patience.

    -

    BadDayBetsytorrentFull


    Downloadhttps://geags.com/2uCrUx



    - -

    In this game, you play as Betsy, a young woman who is having the worst day ever. Everything that can go wrong does go wrong for her. She wakes up late, spills coffee on her dress, gets stuck in traffic, loses her job, breaks up with her boyfriend, and more. Your goal is to help her survive this terrible day and make it to the end.

    - -

    But it won't be easy. The game is full of obstacles, puzzles, mini-games, and surprises that will make you laugh and cry. You will have to deal with angry bosses, annoying coworkers, rude strangers, crazy exes, and even a zombie apocalypse. You will have to make choices that will affect the outcome of the game. Will you be nice or mean? Will you fight or run? Will you give up or keep going?

    - -

    Where can you download BadDayBetsytorrentFull for free?

    - -

    If you are interested in playing this game, you might be wondering where you can download it for free. There are many websites that offer free torrent downloads of this game. However, not all of them are safe and legal. Some of them may contain viruses, malware, or spam. Some of them may also violate the copyright laws and infringe the rights of the original developers and publishers.

    -

    - -

    Therefore, we recommend that you use only trusted and reliable sources to download this game. Here are some of them:

    - -
      -
    • Facebook: You can find the official page of this game on Facebook and follow the link to download it for free. However, you will need a Facebook account to access it.
    • -
    • Igg games: You can find this game on Igg games, a website that provides free games for PC. However, you will need a torrent client to download it.
    • -
    • Slideserve: You can find a PowerPoint presentation of this game on Slideserve, a website that allows you to share and download presentations. However, you will need a Slideserve account to access it.
    • -
    - -

    What are some tips and tricks for playing BadDayBetsytorrentFull?

    - -

    If you want to enjoy this game and have a good time, here are some tips and tricks that might help you:

    - -
      -
    • Save often: The game is full of unexpected events and consequences that can change the course of the story. You might want to save your progress often so that you can go back and try different options if you don't like the outcome.
    • -
    • Explore everything: The game is full of hidden items, secrets, and Easter eggs that can enhance your experience. You might want to explore everything and interact with everything to find them.
    • -
    • Have fun: The game is meant to be funny and entertaining. Don't take it too seriously or get frustrated by the difficulty. Just have fun and laugh at the absurdity of Betsy's bad day.
    • -
    - -

    We hope that you enjoyed this article and learned more about BadDayBetsytorrentFull. If you did, please share it with your friends and family who might also like this game. Thank you for reading!

    -

    We hope that you enjoyed this article and learned more about BadDayBetsytorrentFull. If you did, please share it with your friends and family who might also like this game. Thank you for reading!

    - -If you want me to write a different conclusion, please tell me what you want me to change or add. Thank you.?

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Cartello Orario Di Apertura E Chiusura Da Stampare.md b/spaces/quidiaMuxgu/Expedit-SAM/Cartello Orario Di Apertura E Chiusura Da Stampare.md deleted file mode 100644 index aff62c249cf14967c3311322ed97fb467f432acd..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Cartello Orario Di Apertura E Chiusura Da Stampare.md +++ /dev/null @@ -1,30 +0,0 @@ - -

    Cartello Orario Di Apertura E Chiusura Da Stampare: Come Creare Un Cartello Professionale Per Il Tuo Negozio

    - -

    Se hai un negozio, un'attività commerciale o un ufficio, sai quanto sia importante comunicare ai tuoi clienti gli orari di apertura e chiusura. Un cartello orario ben fatto può fare la differenza tra una buona impressione e una cattiva, tra un cliente soddisfatto e uno insoddisfatto.

    - -

    Ma come creare un cartello orario di apertura e chiusura da stampare che sia efficace, elegante e personalizzato? Non ti preoccupare, in questo articolo ti daremo alcuni consigli utili e ti mostreremo come usare un servizio online gratuito per creare il tuo cartello orario in pochi minuti.

    -

    Cartello Orario Di Apertura E Chiusura Da Stampare


    Download Zip ✶✶✶ https://geags.com/2uCrBs



    - -

    Come Scegliere Il Formato E Il Design Del Tuo Cartello Orario

    - -

    Il primo passo per creare il tuo cartello orario è scegliere il formato e il design che si adattano meglio al tuo negozio. Il formato dipende dallo spazio che hai a disposizione e dal tipo di messaggio che vuoi trasmettere. Puoi optare per un cartello orario piccolo e semplice, da attaccare alla porta o alla vetrina, oppure per un cartello orario grande e vistoso, da appendere al muro o al soffitto.

    - -

    Il design del tuo cartello orario deve essere coerente con l'immagine del tuo negozio e con il tuo target di clientela. Puoi scegliere tra diversi stili, colori e font, a seconda che tu voglia dare un'aria professionale, moderna, vintage o creativa al tuo cartello orario. L'importante è che il cartello orario sia leggibile, chiaro e accattivante.

    - -

    Come Inserire Gli Orari Di Apertura E Chiusura Nel Tuo Cartello Orario

    - -

    Il secondo passo per creare il tuo cartello orario è inserire gli orari di apertura e chiusura nel tuo cartello orario. Questo è il punto più importante, perché è quello che i tuoi clienti guarderanno per sapere quando possono venire a trovarti. Devi essere preciso e aggiornato, per evitare malintesi e reclami.

    - -

    Puoi inserire gli orari di apertura e chiusura in diversi modi, a seconda del formato e del design del tuo cartello orario. Puoi usare una tabella con i giorni della settimana e le fasce orarie, oppure scrivere gli orari in forma testuale, separando i giorni con dei trattini o dei punti. Puoi anche aggiungere delle icone o delle immagini per rendere il tuo cartello orario più visivo e originale.

    - -

    Se hai degli orari particolari, come delle pause pranzo, delle aperture straordinarie o delle chiusure festive, devi specificarli nel tuo cartello orario. Puoi usare dei simboli come delle stelle o dei cuori per evidenziarli, oppure usare dei colori diversi o dei font diversi per distinguerli.

    - -

    Come Creare Il Tuo Cartello Orario Di Apertura E Chiusura Da Stampare Con Un Servizio Online Gratuito

    - -

    Il terzo passo per creare il tuo cartello orario è stamparlo. Ma prima di farlo, devi creare il tuo cartello orario in formato digitale. Come fare? Semplice, basta usare un servizio online gratuito che ti permette di creare il tuo cartello orario di apertura e chiusura da stampare in pochi minuti.

    -

    - -

    Uno di questi servizi è Cartelliorari.it, un sito web che ti offre una vasta scelta di modelli di cartelli orari

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Danielsipperplaneacionycontroldelaproduccionpdf [REPACK].md b/spaces/quidiaMuxgu/Expedit-SAM/Danielsipperplaneacionycontroldelaproduccionpdf [REPACK].md deleted file mode 100644 index 05c8435b7ee3895d0c228b087e2bf86901c04c59..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Danielsipperplaneacionycontroldelaproduccionpdf [REPACK].md +++ /dev/null @@ -1,6 +0,0 @@ -

    danielsipperplaneacionycontroldelaproduccionpdf


    Download Filehttps://geags.com/2uCrSG



    -
    - 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Free Download LandscapePro Studio Full Software !!EXCLUSIVE!!.md b/spaces/quidiaMuxgu/Expedit-SAM/Free Download LandscapePro Studio Full Software !!EXCLUSIVE!!.md deleted file mode 100644 index dab43018d03285ad224eb53a6e98f4a2ba53be29..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Free Download LandscapePro Studio Full Software !!EXCLUSIVE!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Free Download LandscapePro Studio Full Software


    Downloadhttps://geags.com/2uCqQh



    - -WebM for Internet Explorer is free software that enables support for playing ... ProRes supports transfer rates from 10 to 377 Mbps and a variety of SD and HD ... AC3, then you will need either Adobe Encore or Apple DVD Studio Pro – both run on ... file extension of the TurboFloorPlan 3D Home & Landscape Pro software. 1fdad05405
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Lmd Tools Delphi Xe3 LINK Crack.md b/spaces/quidiaMuxgu/Expedit-SAM/Lmd Tools Delphi Xe3 LINK Crack.md deleted file mode 100644 index e12517586c5f03e7c651baf7ef31853d9de72a63..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Lmd Tools Delphi Xe3 LINK Crack.md +++ /dev/null @@ -1,19 +0,0 @@ -

    Lmd Tools Delphi Xe3 Crack


    Downloadhttps://geags.com/2uCrGo



    -
    -Create professional software solutions with RAD Studio XE6 and LMD-Tools. Delphi XE2, Delphi XE2 Starter Edition, Delphi XE3, Delphi XE3 Starter Edition, . Here is a directory with programs for Delphi versions 7, 8, XE, Delphi XE2 and TU. -Version: 2. 2. 3. Language: English. -Date: 1. 2. 0. 0 Size: 4. -Windows 1. 0. 0. Download · How to download. -Version for Windows. -Description · Reviews (0) . -Here is a directory with programs for Delphi versions 7, 8, XE, Delphi XE2 and TU. -Version: 3. 0. 8. Language: English. -Date: 2. 10. -0. Size: 1. -Windows XP, Vista, Windows 7, Windows 8, Windows 1.TU- 2.01. -0. Author: Jason Hughes. -Download · How to download. -Version for . 8a78ff9644
    -
    -
    -

    diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/models/cnn/__init__.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/models/cnn/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Assassin Creed Brotherhood Activation Wizard Serial Number.md b/spaces/raedeXanto/academic-chatgpt-beta/Assassin Creed Brotherhood Activation Wizard Serial Number.md deleted file mode 100644 index 62dafacff96b49beb44b3934b144791ea29bd276..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Assassin Creed Brotherhood Activation Wizard Serial Number.md +++ /dev/null @@ -1,136 +0,0 @@ -
    -

    Assassin's Creed Brotherhood Activation Wizard Serial Number

    -

    Assassin's Creed Brotherhood is an action-adventure video game developed by Ubisoft Montreal and published by Ubisoft in 2010. It is the third major installment in the Assassin's Creed series and a direct sequel to Assassin's Creed II. The game follows the historical adventures of Ezio Auditore da Firenze, a master assassin who fights against the Templars in Renaissance Italy.

    -

    To play Assassin's Creed Brotherhood on PC, you need to activate the game using an activation code that you can get from Steam, the online platform where you can buy and download games. However, some players have reported that they encounter problems with the activation wizard, such as:

    -

    assassin creed brotherhood activation wizard serial number


    Download Zip ⚙⚙⚙ https://tinourl.com/2uL5lp



    -
      -
    • The activation code does not work or is invalid
    • -
    • The activation code has already been used or activated
    • -
    • The game does not launch or crashes after entering the activation code
    • -
    -

    If you are one of these players, don't worry. There are some possible solutions that you can try to fix the issue and enjoy playing Assassin's Creed Brotherhood. In this article, we will show you three solutions that may help you solve the problem with the activation wizard.

    -

    Solution 1: Delete uplay_install.manifest and uplay_install.state files

    -

    One of the possible causes of the issue is that some files related to Uplay, the Ubisoft launcher that runs Assassin's Creed Brotherhood, are corrupted or missing. To fix this, you can try deleting two files: uplay_install.manifest and uplay_install.state. These files are located in your game folder, which is usually in C:\Program Files (x86)\Steam\steamapps\common\Assassin's Creed Brotherhood. To delete these files, follow these steps:

    -
      -
    1. Close Steam and Uplay if they are running
    2. -
    3. Open your game folder in File Explorer
    4. -
    5. Find and delete uplay_install.manifest and uplay_install.state files
    6. -
    7. Restart Steam and Uplay
    8. -
    9. Launch Assassin's Creed Brotherhood from Steam
    10. -
    -

    This solution may fix the issue by forcing Uplay to reinstall some components that are needed for the game to run properly. If this solution works for you, you should be able to enter your activation code and play Assassin's Creed Brotherhood without any problems.

    -

    Solution 2: Contact Ubisoft support

    -

    If deleting the files does not work or you cannot find them in your game folder, another solution is to contact Ubisoft support and ask for their help. Ubisoft support is the official customer service of Ubisoft that can assist you with any issues related to their games. To contact Ubisoft support, you need to submit a support request on their website: https://support.ubi.com/. To submit a support request, follow these steps:

    -
      -
    1. Go to https://support.ubi.com/ and click on Sign In in the top right corner
    2. -
    3. Log in with your Ubisoft account or create one if you don't have one
    4. -
    5. Click on Contact Customer Support in the top right corner
    6. -
    7. Select Assassin's Creed Brotherhood as your product
    8. -
    9. Select PC as your platform
    10. -
    11. Select Technical Issues as your category
    12. -
    13. Select Activation as your sub-category
    14. -
    15. Enter a subject and a description of your issue
    16. -
    17. Attach any screenshots or documents that may help Ubisoft support understand your issue better
    18. -
    19. Click on Submit My Case at the bottom of the page
    20. -
    -

    After submitting your case, you should receive an email confirmation from Ubisoft support with your case number. You can check the status of your case on their website or reply to their email if you have any updates or questions. Ubisoft support may take some time to respond to your case depending on their workload and availability. They may also ask you for more information or suggest some troubleshooting steps before providing you with a solution.

    -

    Solution 3: Use a crack or a keygen

    -

    If none of the above solutions work for you or you don't want to wait for Ubisoft support to respond, there is another option that you can try at your own risk: using a crack or a keygen. A crack is a modified version of the game executable file that bypasses the activation wizard and lets you play Assassin's Creed Brotherhood without entering an activation code. A keygen is a program that generates random activation codes that may work for Assassin's Creed Brotherhood.

    -

    assassin creed brotherhood activation code generator
    -assassin creed brotherhood activation key free download
    -assassin creed brotherhood activation wizard crack
    -assassin creed brotherhood activation wizard serial key
    -assassin creed brotherhood activation wizard product key
    -assassin creed brotherhood activation wizard license key
    -assassin creed brotherhood activation wizard registration code
    -assassin creed brotherhood activation wizard unlock code
    -assassin creed brotherhood activation wizard offline mode
    -assassin creed brotherhood activation wizard bypass
    -assassin creed brotherhood activation wizard error
    -assassin creed brotherhood activation wizard fix
    -assassin creed brotherhood activation wizard download
    -assassin creed brotherhood activation wizard online
    -assassin creed brotherhood activation wizard no survey
    -assassin creed brotherhood activation wizard working
    -assassin creed brotherhood activation wizard legit
    -assassin creed brotherhood activation wizard tutorial
    -assassin creed brotherhood activation wizard how to use
    -assassin creed brotherhood activation wizard 2023
    -assassin creed brotherhood serial number generator
    -assassin creed brotherhood serial number free download
    -assassin creed brotherhood serial number crack
    -assassin creed brotherhood serial number keygen
    -assassin creed brotherhood serial number product key
    -assassin creed brotherhood serial number license key
    -assassin creed brotherhood serial number registration code
    -assassin creed brotherhood serial number unlock code
    -assassin creed brotherhood serial number offline mode
    -assassin creed brotherhood serial number bypass
    -assassin creed brotherhood serial number error
    -assassin creed brotherhood serial number fix
    -assassin creed brotherhood serial number download
    -assassin creed brotherhood serial number online
    -assassin creed brotherhood serial number no survey
    -assassin creed brotherhood serial number working
    -assassin creed brotherhood serial number legit
    -assassin creed brotherhood serial number tutorial
    -assassin creed brotherhood serial number how to use
    -assassin creed brotherhood serial number 2023

    -

    To use a crack or a keygen, you need to find one online from a reliable and safe source. There are many websites that offer cracks and keygens for various games, but not all of them are trustworthy. Some of them may contain viruses, malware, spyware, adware, or other harmful software that can damage your computer or steal your personal information. Therefore, you need to be careful when downloading anything from these websites.

    -

    One of the websites that we recommend for finding cracks and keygens is The Pirate Bay (https://thepiratebay.org/). The Pirate Bay is one of the most popular torrent websites that hosts millions of files uploaded by users around the world. You can use The Pirate Bay to search for cracks and keygens for Assassin's Creed Brotherhood by typing "assassin creed brotherhood crack" or "assassin creed brotherhood keygen" in the search box.

    -

    To download files from The Pirate Bay, you need to use a torrent client such as BitTorrent (https://www.bittorrent.com/) or uTorrent (https://www.utorrent.com/). A torrent client is a program that allows you to download files from other users who have them on their computers. To download a file from The Pirate Bay using a torrent client, follow these steps:

    -
      -
    1. Go to https://thepiratebay.org/ and search for "assassin creed brotherhood crack" or "assassin creed brotherhood keygen"
    2. -
    3. Sort the results by seeders (SE) in descending order. Seeders are users who have downloaded the file and are sharing it with others. The more seeders a file has, the faster it will download.
    4. -
    5. Pick a file that has many seeders and good comments from other users who have downloaded it.
    6. -
    7. Click on Get This Torrent (the magnet icon) next to the file name.
    8. -
    9. Your torrent client should open automatically and start downloading the file.
    10. -
    11. Wait until the download is complete.
    12. -
    13. Open the downloaded file using WinRAR (https://www.win-rar.com/) or another program that can extract compressed files.
    14. -
    15. Follow the instructions in the readme.txt file or on The Pirate Bay page on how to use the crack or keygen.
    16. -
    -

    To use a crack, you usually need to copy and paste it into your game folder, replacing the original game executable file. To use a keygen, you usually need to run it and copy one of its generated codes into your activation wizard.

    -

    Using a crack or a keygen may allow you to play Assassin's Creed Brotherhood without any issues with the activation wizard. However, there are some risks and drawbacks of doing so:

    -
      -
    • You may violate Ubisoft's terms of service and end user license agreement by using unauthorized software that modifies their game.
    • -
    • You may lose access to some features of Assassin's Creed Brotherhood such as multiplayer mode, achievements, rewards, updates, patches, etc.
    • -
        -
      • You may expose your computer to viruses, malware, spyware, adware, or other harmful software that may come with the crack or keygen.
      • -
      • You may face legal consequences if Ubisoft or other authorities find out that you are using a crack or keygen.
      • -
      -

      Therefore, we do not recommend using a crack or keygen unless you are absolutely desperate and have no other choice. We also advise you to scan your computer with a reliable antivirus program after using a crack or keygen to make sure that your computer is safe and clean.

      -

      Conclusion

      -

      In this article, we have shown you three possible solutions that may help you solve the problem with the activation wizard of Assassin's Creed Brotherhood. We hope that one of these solutions works for you and that you can enjoy playing this amazing game. Assassin's Creed Brotherhood is a masterpiece of video game design that offers a thrilling and immersive experience of being an assassin in Renaissance Italy. You can explore a vast and beautiful open world, engage in epic battles and stealth missions, recruit and train other assassins, and uncover the secrets of the ancient order of the Assassins.

      -

      If you have any feedback or questions about this article or Assassin's Creed Brotherhood, please feel free to share them with us in the comments section below. We would love to hear from you and help you with any issues that you may have. Also, if you liked this article and found it helpful, please share it with your friends and fellow gamers who may also need some help with the activation wizard of Assassin's Creed Brotherhood.

      -

      Thank you for reading this article and we hope you have a great time playing Assassin's Creed Brotherhood. Remember, nothing is true, everything is permitted.

      -

      FAQs

      -

      Here are some frequently asked questions about Assassin's Creed Brotherhood and its activation wizard:

      -
        -
      1. What is the difference between Assassin's Creed Brotherhood and Assassin's Creed II?
      2. -

        Assassin's Creed Brotherhood is a direct sequel to Assassin's Creed II that continues the story of Ezio Auditore da Firenze. However, it also introduces some new features and improvements such as:

        -
          -
        • A multiplayer mode that allows you to play as different characters from the Assassin's Creed universe and compete against other players in various modes and maps.
        • -
        • A brotherhood system that allows you to recruit and train other assassins who can help you in your missions or fight alongside you.
        • -
        • A Borgia tower system that allows you to liberate different districts of Rome from the control of the Templars by destroying their towers and killing their captains.
        • -
        • A shop quest system that allows you to upgrade your equipment and unlock new items by completing various tasks for different shopkeepers.
        • -
        • A war machine system that allows you to use Leonardo da Vinci's inventions such as a tank, a bomber, a cannon, and a parachute.
        • -
        -
      3. How can I play Assassin's Creed Brotherhood multiplayer mode?
      4. -

        To play Assassin's Creed Brotherhood multiplayer mode, you need to have a Ubisoft account and an internet connection. You can access the multiplayer mode from the main menu of the game or from Uplay. You can choose from different modes such as:

        -
          -
        • Wanted: A free-for-all mode where you have to find and kill your assigned target while avoiding being killed by other players.
        • -
        • Alliance: A team-based mode where you have to work with your partner to find and kill your assigned targets while avoiding being killed by another team.
        • -
        • Manhunt: A team-based mode where one team has to hide and survive while another team has to find and kill them.
        • -
        • Chest Capture: A team-based mode where one team has to defend chests while another team has to capture them.
        • -
        • Escort: A team-based mode where one team has to escort a VIP while another team has to assassinate him.
        • -
        -
      5. How can I get more activation codes for Assassin's Creed Brotherhood?
      6. -

        If you need more activation codes for Assassin's Creed Brotherhood, you can buy them from Steam or other online platforms that sell games. You can also try contacting Ubisoft support and asking them for more codes if you have a valid reason. However, we do not recommend using cracks or keygens as they may be illegal or harmful for your computer. -

      7. How can I get rid of the activation wizard of Assassin's Creed Brotherhood?
      8. -

        If you want to get rid of the activation wizard of Assassin's Creed Brotherhood, you can try one of the solutions that we have mentioned in this article. However, we do not guarantee that they will work for everyone or that they will not cause any problems for your game or computer. Use them at your own risk and discretion. -

      9. How can I play Assassin's Creed Brotherhood without Steam?
      10. -

        If you want to play Assassin's Creed Brotherhood without Steam, you can buy a physical copy of the game from a store or online. However, you will still need Uplay to activate and run the game. You can also try using a crack or a keygen that may allow you to play without Steam or Uplay, but we do not recommend doing so as they may be illegal or harmful for your computer. -

      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Dekart Private Disk 2.10 Keygen 11l.md b/spaces/raedeXanto/academic-chatgpt-beta/Dekart Private Disk 2.10 Keygen 11l.md deleted file mode 100644 index 6063b449d270b6da301291dbf359218a792bc0ba..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Dekart Private Disk 2.10 Keygen 11l.md +++ /dev/null @@ -1,173 +0,0 @@ - -

      Dekart Private Disk 2.10 Keygen 11l: A Powerful and Secure Disk Encryption Software

      -

      Do you want to protect your sensitive data from unauthorized access, theft, or loss? Do you want to create a virtual encrypted disk that acts like a real drive on your computer? Do you want to use a reliable and easy-to-use disk encryption software that supports various media and encryption algorithms? If you answered yes to any of these questions, then you might be interested in Dekart Private Disk 2.10, a powerful and secure disk encryption software that lets you create encrypted disk partitions (drive letters) to protect your confidential information.

      -

      Dekart Private Disk 2.10 Keygen 11l


      Download File ->->->-> https://tinourl.com/2uL5eX



      -

      In this article, we will explain what Dekart Private Disk 2.10 is, what are its features and benefits, how to download and install it, how to use a keygen to activate it, how to use it to protect your data, and some tips and tricks for using it effectively. We will also answer some frequently asked questions about Dekart Private Disk 2.10 at the end of the article.

      -

      What is Dekart Private Disk 2.10?

      -

      Dekart Private Disk 2.10 is an easy-to-use, secure, and reliable disk encryption software that lets you transparently encrypt proprietary data. It creates one or more virtual disks on your hard drive and/or other external storage devices. Once you create a virtual encrypted disk, that disk is assigned a new drive letter and appears to you like any other system drive (like the A: or C: drive). When the disk is mounted, the data is automatically encrypted and decrypted when you write or read this data respectively. When the disk is unmounted, either manually or by logging out of Windows, the secure data it contains becomes completely unreadable and undetectable by the operating system.

      -

      Features and benefits of Dekart Private Disk 2.10

      -

      Some of the features and benefits of Dekart Private Disk 2.10 are:

      -
        -
      • It supports various media such as HDD, FDD, CD, CD/R, CD/RW, MO, MD, ZIP-disks, flash drives, all types of flash memory cards, PDAs, and even digital cameras.
      • -
      • It supports various encryption algorithms such as AES (Rijndael), Twofish, Blowfish, CAST-256, Serpent etc., certified by the National Institute of Standards and Technology as corresponding to Advanced Encryption Standard (AES) and Secure Hash Standard (SHA-1).
      • -
      • It allows you to store your information on removable media such as CD/DVD disks or USB flash drives without installing any software on the host computer.
      • -
      • It allows you to share a single PC with multiple users by creating different virtual encrypted disks for each user.
      • -
      • It allows you to hide your virtual encrypted disks from prying eyes by using hotkeys or passwords.
      • -
      • It allows you to backup and restore your virtual encrypted disks in case of emergency or data loss.
      • -
      • It allows you to customize the settings and preferences of the software according to your needs.
      • -
      - How to download and install Dekart Private Disk 2.10 -

      If you want to download and install Dekart Private Disk 2.10, you can follow these simple steps:

      -

      -
        -
      1. Go to the official website of Dekart Private Disk 2.10 at https://www.dekart.com/products/encryption/private_disk/ and click on the "Download" button.
      2. -
      3. Save the setup file (pdisk.exe) to your computer and run it.
      4. -
      5. Follow the instructions on the screen to complete the installation process.
      6. -
      7. After the installation is finished, you will see a shortcut icon of Dekart Private Disk 2.10 on your desktop.
      8. -
      9. Double-click on the icon to launch the software and enter your name and email address to register it.
      10. -
      -

      Congratulations, you have successfully downloaded and installed Dekart Private Disk 2.10 on your computer!

      -

      What is a keygen and why do you need it?

      -

      A keygen is a software tool that generates a unique serial number or activation code for a specific software program. A keygen is usually used to bypass the software licensing or registration process and unlock the full features and functions of the software without paying for it.

      -

      You might need a keygen if you want to use Dekart Private Disk 2.10 without any limitations or restrictions. However, you should be aware that using a keygen is illegal and unethical, as it violates the intellectual property rights of the software developer. Moreover, using a keygen can expose your computer to malware, viruses, or spyware that can harm your system or steal your personal information.

      -

      The advantages and disadvantages of using a keygen

      -

      Some of the advantages and disadvantages of using a keygen are:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      AdvantagesDisadvantages
      You can use the software for free without paying for it.You are breaking the law and risking legal consequences.
      You can access all the features and functions of the software without any limitations or restrictions.You are violating the terms and conditions of the software license agreement.
      You can save money and time by not having to purchase or register the software.You are depriving the software developer of their rightful income and incentive to improve the software.
      You can easily find and download a keygen from various websites or torrent sites.You are exposing your computer to malware, viruses, or spyware that can damage your system or steal your data.
      You can use the software on multiple computers or devices without any restrictions.You are not eligible for any technical support, updates, or upgrades from the software developer.
      -

      How to use a keygen to activate Dekart Private Disk 2.10

      -

      If you still want to use a keygen to activate Dekart Private Disk 2.10, you can follow these steps at your own risk:

      -
        -
      1. Go to a website or torrent site that offers a keygen for Dekart Private Disk 2.10, such as https://cracksgurus.com/crack/Dekart-Private-Disk-v210-keygen-by-EMBRACE-bee0c4f9f7.html or https://www.crackznet.com/x/m/t/dekart+private+disk+2.10/.
      2. -
      3. Download the keygen file (usually a zip or rar file) and extract it to your computer.
      4. -
      5. Run the keygen file (usually an exe file) and click on the "Generate" button to generate a serial number or activation code for Dekart Private Disk 2.10.
      6. -
      7. Copy the serial number or activation code and paste it into the registration window of Dekart Private Disk 2.10 when prompted.
      8. -
      9. Click on the "OK" button to complete the activation process.
      10. -
      -

      Congratulations, you have successfully activated Dekart Private Disk 2.10 with a keygen!

      -

      How to use Dekart Private Disk 2.10 to protect your data

      -

      Now that you have downloaded, installed, and activated Dekart Private Disk 2.10, you can start using it to protect your data with encryption. Here are some of the basic steps you need to follow:

      -

      How to create

      How to create and mount a virtual encrypted disk

      -

      To create and mount a virtual encrypted disk with Dekart Private Disk 2.10, you need to do the following:

      -
        -
      1. Launch Dekart Private Disk 2.10 and click on the "Create Disk" button.
      2. -
      3. Select the location and name of the file that will store your virtual encrypted disk. You can also choose the size, encryption algorithm, and password for your disk.
      4. -
      5. Click on the "Create" button to create your virtual encrypted disk.
      6. -
      7. After the creation is completed, you will see your virtual encrypted disk in the main window of Dekart Private Disk 2.10. It will have a drive letter assigned to it, such as D: or E:.
      8. -
      9. Double-click on your virtual encrypted disk to mount it. You will be asked to enter your password to access it.
      10. -
      11. After you enter your password, you will see your virtual encrypted disk in Windows Explorer as a normal drive. You can use it to store your files and folders as usual.
      12. -
      -

      Congratulations, you have successfully created and mounted a virtual encrypted disk with Dekart Private Disk 2.10!

      -

      How to encrypt and decrypt your files and folders

      -

      To encrypt and decrypt your files and folders with Dekart Private Disk 2.10, you need to do the following:

      -
        -
      1. Make sure your virtual encrypted disk is mounted and accessible.
      2. -
      3. Drag and drop your files and folders from your regular drive (such as C:) to your virtual encrypted disk (such as D:). This will automatically encrypt them with the encryption algorithm and password you chose for your disk.
      4. -
      5. To decrypt your files and folders, simply drag and drop them from your virtual encrypted disk to your regular drive. This will automatically decrypt them with the same encryption algorithm and password.
      6. -
      -

      Note that you can also use the right-click menu to encrypt or decrypt your files and folders. Just right-click on the file or folder you want to encrypt or decrypt, select "Dekart Private Disk", and choose "Encrypt" or "Decrypt".

      -

      Congratulations, you have successfully encrypted and decrypted your files and folders with Dekart Private Disk 2.10!

      -

      How to unmount and hide your virtual encrypted disk

      -

      To unmount and hide your virtual encrypted disk with Dekart Private Disk 2.10, you need to do the following:

      -
        -
      1. Make sure you have saved and closed all the files and folders on your virtual encrypted disk.
      2. -
      3. Go to the main window of Dekart Private Disk 2.10 and select your virtual encrypted disk from the list.
      4. -
      5. Click on the "Unmount" button to unmount your virtual encrypted disk. You will see a message confirming that your disk has been unmounted successfully.
      6. -
      7. If you want to hide your virtual encrypted disk from prying eyes, you can also click on the "Hide" button. This will remove the file that stores your virtual encrypted disk from its location. You can also use a hotkey (Ctrl+Alt+H by default) to hide or unhide your disk.
      8. -
      -

      Congratulations, you have successfully unmounted and hidden your virtual encrypted disk with Dekart Private Disk 2.10!

      -

      Tips and tricks for using Dekart Private Disk 2.10 effectively

      -

      Here are some tips and tricks for using Dekart Private Disk 2.10 effectively:

      -

      How to use keyboard shortcuts to manage your virtual encrypted disk

      -

      You can use keyboard shortcuts to perform various actions on your virtual encrypted disk, such as mounting, unmounting, hiding, unhiding, encrypting, or decrypting. Here are some of the keyboard shortcuts you can use:

      -
        -
      • Ctrl+Alt+M: Mount a virtual encrypted disk.
      • -
      • Ctrl+Alt+U: Unmount a virtual encrypted disk.
      • -
      • Ctrl+Alt+H: Hide or unhide a virtual encrypted disk.
      • -
      • Ctrl+Alt+E: Encrypt a file or folder.
      • -
      • Ctrl+Alt+D: Decrypt a file or folder.
      • -
      -

      You can also customize the keyboard shortcuts according to your preferences by going to the "Options" menu of Dekart Private Disk 2.10 and selecting "Hotkeys".

      -

      How to backup and restore your virtual encrypted disk

      -

      You can backup and restore your virtual encrypted disk in case of emergency or data loss with Dekart Private Disk 2.10. Here are the steps you need to follow:

      -
      1. To backup your virtual encrypted disk, go to the "File" menu of Dekart Private Disk 2.10 and select "Backup Disk". Choose the location and name of the backup file and click on the "Save" button.
      2. -
      3. To restore your virtual encrypted disk, go to the "File" menu of Dekart Private Disk 2.10 and select "Restore Disk". Choose the backup file you want to restore and click on the "Open" button.
      4. -
      -

      Note that you can also use the right-click menu to backup or restore your virtual encrypted disk. Just right-click on the disk you want to backup or restore, select "Dekart Private Disk", and choose "Backup Disk" or "Restore Disk".

      -

      How to customize the settings and preferences of Dekart Private Disk 2.10

      -

      You can customize the settings and preferences of Dekart Private Disk 2.10 according to your needs by going to the "Options" menu of Dekart Private Disk 2.10 and selecting one of the following options:

      -
        -
      • "General": You can change the general settings such as the language, the startup mode, the tray icon, the password cache, etc.
      • -
      • "Hotkeys": You can change the keyboard shortcuts for various actions such as mounting, unmounting, hiding, unhiding, encrypting, or decrypting.
      • -
      • "Security": You can change the security settings such as the encryption algorithm, the password strength, the password hint, etc.
      • -
      • "Advanced": You can change the advanced settings such as the disk size, the disk format, the disk label, etc.
      • -
      -

      After you make any changes to the settings and preferences, click on the "OK" button to save them.

      -

      Conclusion

      -

      Dekart Private Disk 2.10 is a powerful and secure disk encryption software that lets you create virtual encrypted disks to protect your sensitive data. It has many features and benefits that make it easy to use, reliable, and versatile. However, it also has some drawbacks that you should be aware of, such as using a keygen to activate it illegally or exposing your computer to malware or viruses. Therefore, you should use Dekart Private Disk 2.10 with caution and responsibility.

      -

      We hope this article has helped you understand what Dekart Private Disk 2.10 is, how to download and install it, how to use a keygen to activate it, how to use it to protect your data, and some tips and tricks for using it effectively. If you have any questions or comments about Dekart Private Disk 2.10, feel free to leave them below.

      -

      FAQs

      -

      Here are some frequently asked questions about Dekart Private Disk 2.10:

      -

      Q: Is Dekart Private Disk 2.10 compatible with Windows 10?

      -

      A: Yes, Dekart Private Disk 2.10 is compatible with Windows 10 as well as Windows XP/Vista/7/8/8.1.

      -

      Q: How many virtual encrypted disks can I create with Dekart Private Disk 2.10?

      -

      A: You can create as many virtual encrypted disks as you want with Dekart Private Disk 2.10, as long as you have enough disk space and memory on your computer.

      -

      Q: How can I recover my password if I forget it?

      -

      A: If you forget your password for your virtual encrypted disk, you can use the password hint feature of Dekart Private Disk 2.10 to remind you of it. However, if you don't have a password hint or if it doesn't help you remember your password, then there is no way to recover your password or access your data. Therefore, you should always remember your password or write it down somewhere safe.

      -

      Q: Can I use Dekart Private Disk 2.10 on a Mac or Linux computer?

      -

      A: No, Dekart Private Disk 2.10 is only designed for Windows computers. However, you can use other disk encryption software that are compatible with Mac or Linux computers, such as VeraCrypt or Cryptomator.

      -

      Q: Can I use Dekart Private Disk 2.10 on a network drive or a cloud storage service?

      -

      A: Yes, you can use Dekart Private Disk 2.10 on a network drive or a cloud storage service such as Dropbox or Google Drive. However, you should be careful about who has access to your network drive or cloud storage service and how secure they are. You should also make sure that you unmount and hide your virtual encrypted disk before logging out of your network drive or cloud storage service.

      Dekart Private Disk 2.10 Keygen 11l: A Powerful and Secure Disk Encryption Software -

      Do you want to protect your sensitive data from unauthorized access, theft, or loss? Do you want to create a virtual encrypted disk that acts like a real drive on your computer? Do you want to use a reliable and easy-to-use disk encryption software that supports various media and encryption algorithms? If you answered yes to any of these questions, then you might be interested in Dekart Private Disk 2.10, a powerful and secure disk encryption software that lets you create encrypted disk partitions (drive letters) to protect your confidential information.

      -

      In this article, we will explain what Dekart Private Disk 2.10 is, what are its features and benefits, how to download and install it, how to use a keygen to activate it, how to use it to protect your data, and some tips and tricks for using it effectively. We will also answer some frequently asked questions about Dekart Private Disk 2.10 at the end of the article.

      -

      What is Dekart Private Disk 2.10?

      -

      Dekart Private Disk 2.10 is an easy-to-use, secure, and reliable disk encryption software that lets you transparently encrypt proprietary data. It creates one or more virtual disks on your hard drive and/or other external storage devices. Once you create a virtual encrypted disk, that disk is assigned a new drive letter and appears to you like any other system drive (like the A: or C: drive). When the disk is mounted, the data is automatically encrypted and decrypted when you write or read this data respectively. When the disk is unmounted, either manually or by logging out of Windows, the secure data it contains becomes completely unreadable and undetectable by the operating system.

      -

      Features and benefits of Dekart Private Disk 2.10

      -

      Some of the features and benefits of Dekart Private Disk 2.10 are:

      -
        -
      • It supports various media such as HDD, FDD, CD, CD/R, CD/RW, MO, MD, ZIP-disks, flash drives, all types of flash memory cards, PDAs, and even digital cameras.
      • -
      • It supports various encryption algorithms such as AES (Rijndael), Twofish, Blowfish, CAST-256, Serpent etc., certified by the National Institute of Standards and Technology as corresponding to Advanced Encryption Standard (AES) and Secure Hash Standard (SHA-1).
      • -
      • It allows you to store your information on removable media such as CD/DVD disks or USB flash drives without installing any software on the host computer.
      • -
      • It allows you to share a single PC with multiple users by creating different virtual encrypted disks for each user.
      • -
      • It allows you to hide your virtual encrypted disks from prying eyes by using hotkeys or passwords.
      • -
      • It allows you to backup and restore your virtual encrypted disks in case of emergency or data loss.
      • -
      • It allows you to customize the settings and preferences of the software according to your needs.
      • -
      -

      How to download and install Dekart Private Disk 2.10

      -

      If you want to download and install Dekart Private Disk 2.10, you can follow these simple steps:

      -
        -
      1. Go to the official website of Dekart Private Disk 2.10 at https://www.dekart.com/products/encryption/private_disk/ and click on the "Download" button.
      2. -
      3. Save the setup file (pdisk.exe) to your computer and run it.
      4. -
      5. Follow the instructions on the screen to complete the installation process.
      6. -
      7. After the installation is finished, you will see a shortcut icon of Dekart Private Disk 2.10 on your desktop.
      8. -
      9. Double-click on the icon to launch the software and enter your name and email address to register it.
      10. -
      -

      Congratulations, you have successfully downloaded and installed Dekart Private Disk 2.10 on your computer!

      -

      What is a keygen and why do you need it?

      -

      A keygen is a software tool that generates a unique serial number or activation code for a specific software program. A keygen is usually used to bypass the software licensing or registration process and unlock the full features and functions of the software without paying for it.

      -

      You might need a keygen if you want to use Dekart Private Disk 2.10 without any limitations or restrictions. However, you should be aware that using a keygen is illegal and unethical, as it violates the intellectual property rights of

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Complemento Matematico 3 Cuaderno De Trabajo Secundaria Pdf Free.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Complemento Matematico 3 Cuaderno De Trabajo Secundaria Pdf Free.md deleted file mode 100644 index 21815e45163bbc726a65286fe5d2fbda198251f9..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Complemento Matematico 3 Cuaderno De Trabajo Secundaria Pdf Free.md +++ /dev/null @@ -1,91 +0,0 @@ - -

      Complemento matematico 3 cuaderno de trabajo secundaria pdf free: una herramienta para aprender y practicar matemáticas

      - -

      ¿Te gustan las matemáticas? ¿Quieres mejorar tus habilidades y conocimientos en esta materia? ¿Buscas un recurso didáctico que te ayude a prepararte para tus exámenes? Si tu respuesta es sí, entonces te interesa descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free.

      -

      complemento matematico 3 cuaderno de trabajo secundaria pdf free


      Download >>> https://urlgoal.com/2uCJVM



      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free es un excelente material educativo que te ofrece una variedad de ejercicios y problemas de matemáticas para el tercer grado de secundaria. El cuaderno de trabajo está diseñado por el profesor Armando Casarrubias García, quien tiene una amplia experiencia en la enseñanza de esta disciplina.

      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free está organizado en cinco bloques temáticos, que abarcan los contenidos del programa oficial de matemáticas para el tercer grado de secundaria. Cada bloque se divide en lecciones, que incluyen explicaciones, ejemplos, actividades y soluciones. El cuaderno de trabajo también contiene autoevaluaciones y evaluaciones finales para que puedas medir tu progreso y reforzar tus aprendizajes.

      - -

      ¿Por qué descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free?

      - -

      Descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free tiene muchas ventajas, tanto para los alumnos como para los maestros. Aquí te mencionamos algunas de ellas:

      -

      - -
        -
      • Es un recurso gratuito y accesible. Puedes descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free desde cualquier dispositivo con conexión a internet, sin tener que pagar nada. Además, puedes imprimirlo o consultarlo en formato digital según tu preferencia.
      • -
      • Es un recurso completo y actualizado. El complemento matematico 3 cuaderno de trabajo secundaria pdf free cubre todos los temas y objetivos del programa oficial de matemáticas para el tercer grado de secundaria. Además, está acorde con las tendencias y los avances de la educación matemática en el mundo.
      • -
      • Es un recurso didáctico y dinámico. El complemento matematico 3 cuaderno de trabajo secundaria pdf free te ofrece una metodología activa y participativa, que te invita a explorar, descubrir, razonar y resolver problemas de matemáticas. El cuaderno de trabajo también te propone retos y juegos que te motivan y te divierten mientras aprendes.
      • -
      • Es un recurso flexible y adaptable. El complemento matematico 3 cuaderno de trabajo secundaria pdf free te permite trabajar a tu propio ritmo y según tus necesidades e intereses. Puedes elegir los temas y las actividades que más te gusten o que más te cuesten. También puedes usar el cuaderno de trabajo como complemento o como sustituto del libro de texto.
      • -
      - -

      ¿Cómo descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free?

      - -

      Si quieres descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free, tienes varias opciones para hacerlo. Puedes buscarlo en diferentes sitios web que ofrecen descargas gratuitas de libros y materiales educativos. Sin embargo, debes tener cuidado con la calidad y la seguridad de estos sitios, ya que pueden contener virus, malware o contenido inapropiado.

      - -

      La mejor opción para descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free es hacerlo desde una plataforma legal y segura que te garantice una descarga de alta calidad y sin riesgos. Una plataforma que te recomendamos es Scribd, un sitio web que te permite acceder a millones de libros, documentos, audiolibros y podcasts por una suscripción mensual o anual.

      - -

      Para descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free desde Scribd, solo tienes que seguir estos pasos:

      - -
        -
      1. Ingresa a Scribd.com desde tu navegador o descarga la aplicación móvil desde tu tienda de aplicaciones.
      2. -
      3. Crea una cuenta gratuita o inicia sesión con tu correo electrónico, Facebook o Google.
      4. -
      5. Busca el complemento matematico 3 cuaderno de trabajo secundaria pdf free en el buscador o en la categoría de educación.
      6. -
      7. Haz clic en el botón de descargar o leer ahora.
      8. -
      9. Disfruta del complemento matematico 3 cuaderno de trabajo secundaria pdf free en tu dispositivo o imprímelo si lo deseas.
      10. -
      - -

      Conclusión

      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free es un excelente recurso educativo que te ayudará a aprender y practicar matemáticas para el tercer grado de secundaria. El cuaderno de trabajo está diseñado por el profesor Armando Casarrubias García, quien tiene una amplia experiencia en la enseñanza de esta disciplina.

      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free está organizado en cinco bloques temáticos, que abarcan los contenidos del programa oficial de matemáticas para el tercer grado de secundaria. Cada bloque se divide en lecciones, que incluyen explicaciones, ejemplos, actividades y soluciones. El cuaderno de trabajo también contiene autoevaluaciones y evaluaciones finales para que puedas medir tu progreso y reforzar tus aprendizajes.

      - -

      Descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free tiene muchas ventajas, tanto para los alumnos como para los maestros. Es un recurso gratuito y accesible, completo y actualizado, didáctico y dinámico, flexible y adaptable. Puedes descargarlo desde una plataforma legal y segura como Scribd, que te garantiza una descarga de alta calidad y sin riesgos.

      - -

      No esperes más y descarga el complemento matematico 3 cuaderno de trabajo secundaria pdf free hoy mismo y disfruta de este material educativo que te ofrecerá una experiencia única e inolvidable con las matemáticas.

      -
      ¿Qué temas abarca el complemento matematico 3 cuaderno de trabajo secundaria pdf free?
      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free abarca los siguientes temas de matemáticas para el tercer grado de secundaria:

      - -
        -
      • Bloque 1: Números y sistemas de numeración. En este bloque se estudian las propiedades y las operaciones con números naturales, enteros, racionales e irracionales. También se analizan los sistemas de numeración decimal, binario y hexadecimal, y se resuelven problemas que involucran estos sistemas.
      • -
      • Bloque 2: Álgebra. En este bloque se revisan los conceptos y las operaciones con expresiones algebraicas, ecuaciones e inecuaciones de primer y segundo grado, sistemas de ecuaciones lineales y cuadráticas, y funciones lineales y cuadráticas. También se aplican estos conceptos a la resolución de problemas de la vida real.
      • -
      • Bloque 3: Geometría y trigonometría. En este bloque se exploran las propiedades y las relaciones entre figuras geométricas planas y espaciales, como ángulos, triángulos, cuadriláteros, polígonos regulares, circunferencia, círculo, prismas, pirámides, cilindros, conos y esferas. También se introducen los conceptos y las aplicaciones de la trigonometría en el triángulo rectángulo y en el triángulo cualquiera.
      • -
      • Bloque 4: Medida. En este bloque se trabajan los conceptos y las unidades de medida de longitud, superficie, volumen, capacidad, masa, tiempo y temperatura. También se realizan conversiones entre diferentes sistemas de medida y se resuelven problemas que involucran estas magnitudes.
      • -
      • Bloque 5: Probabilidad y estadística. En este bloque se abordan los conceptos y los métodos de la probabilidad y la estadística para analizar e interpretar datos numéricos y gráficos. Se estudian los conceptos de espacio muestral, evento, probabilidad clásica y frecuencial, experimentos aleatorios y dependientes. También se revisan los conceptos de población, muestra, variable estadística, frecuencia absoluta y relativa, media aritmética, mediana, moda, rango, desviación estándar y coeficiente de variación.
      • -
      - -
      ¿Cómo usar el complemento matematico 3 cuaderno de trabajo secundaria pdf free?
      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free es un material educativo que puedes usar de diferentes formas según tus objetivos y preferencias. Aquí te damos algunas sugerencias:

      - -
        -
      • Puedes usar el complemento matematico 3 cuaderno de trabajo secundaria pdf free como un apoyo para tu libro de texto o para tu clase de matemáticas. Puedes consultar las explicaciones y los ejemplos del cuaderno de trabajo cuando tengas dudas o cuando quieras repasar algún tema. También puedes realizar las actividades y los problemas del cuaderno de trabajo para practicar lo que has aprendido o para prepararte para tus exámenes.
      • -
      • Puedes usar el complemento matematico 3 cuaderno de trabajo secundaria pdf free como un recurso para aprender por tu cuenta o para ampliar tus conocimientos. Puedes elegir los temas que más te interesen o que más te cuesten del cuaderno de trabajo y estudiarlos a tu propio ritmo. También puedes buscar más información o ejercicios sobre esos temas en internet o en otros libros.
      • -
      • Puedes usar el complemento matematico 3 cuaderno de trabajo secundaria pdf free como un material para divertirte y retarte con las matemáticas. Puedes resolver los retos y los juegos que te propone el cuaderno de trabajo y ver si puedes encontrar más de una solución o una solución diferente a la propuesta. También puedes inventar tus propios retos o juegos con las matemáticas y compartirlos con tus amigos o familiares.
      • -
      - -Descarga el complemento matematico 3 cuaderno de trabajo secundaria pdf free hoy mismo - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free es un excelente recurso educativo que te ayudará a aprender y practicar matemáticas para el tercer grado de secundaria. El cuaderno de trabajo está diseñado por el profesor Armando Casarrubias García, quien tiene una amplia experiencia en la enseñanza de esta disciplina.

      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free abarca los siguientes temas de matemáticas para el tercer grado de secundaria: números y sistemas de numeración, álgebra, geometría y trigonometría, medida, probabilidad y estadística. Cada tema se divide en lecciones, que incluyen explicaciones, ejemplos, actividades y soluciones. El cuaderno de trabajo también contiene autoevaluaciones y evaluaciones finales para que puedas medir tu progreso y reforzar tus aprendizajes.

      - -

      Descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free tiene muchas ventajas, tanto para los alumnos como para los maestros. Es un recurso gratuito y accesible, completo y actualizado, didáctico y dinámico, flexible y adaptable. Puedes descargarlo desde una plataforma legal y segura como Scribd, que te garantiza una descarga de alta calidad y sin riesgos.

      - -

      No esperes más y descarga el complemento matematico 3 cuaderno de trabajo secundaria pdf free hoy mismo y disfruta de este material educativo que te ofrecerá una experiencia única e inolvidable con las matemáticas.

      -Complemento matematico 3 cuaderno de trabajo secundaria pdf free: una herramienta para aprender y practicar matemáticas - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free es un excelente recurso educativo que te ayudará a aprender y practicar matemáticas para el tercer grado de secundaria. El cuaderno de trabajo está diseñado por el profesor Armando Casarrubias García, quien tiene una amplia experiencia en la enseñanza de esta disciplina.

      - -

      El complemento matematico 3 cuaderno de trabajo secundaria pdf free abarca los siguientes temas de matemáticas para el tercer grado de secundaria: números y sistemas de numeración, álgebra, geometría y trigonometría, medida, probabilidad y estadística. Cada tema se divide en lecciones, que incluyen explicaciones, ejemplos, actividades y soluciones. El cuaderno de trabajo también contiene autoevaluaciones y evaluaciones finales para que puedas medir tu progreso y reforzar tus aprendizajes.

      - -

      Descargar el complemento matematico 3 cuaderno de trabajo secundaria pdf free tiene muchas ventajas, tanto para los alumnos como para los maestros. Es un recurso gratuito y accesible, completo y actualizado, didáctico y dinámico, flexible y adaptable. Puedes descargarlo desde una plataforma legal y segura como Scribd, que te garantiza una descarga de alta calidad y sin riesgos.

      - -

      Puedes usar el complemento matematico 3 cuaderno de trabajo secundaria pdf free de diferentes formas según tus objetivos y preferencias. Puedes usarlo como un apoyo para tu libro de texto o para tu clase de matemáticas, como un recurso para aprender por tu cuenta o para ampliar tus conocimientos, o como un material para divertirte y retarte con las matemáticas.

      - -

      No esperes más y descarga el complemento matematico 3 cuaderno de trabajo secundaria pdf free hoy mismo y disfruta de este material educativo que te ofrecerá una experiencia única e inolvidable con las matemáticas.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Microsoft Office 2010 Ez-activator.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Microsoft Office 2010 Ez-activator.md deleted file mode 100644 index f92a7d62d7accd2aac4b275feaebc569b0be651b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Microsoft Office 2010 Ez-activator.md +++ /dev/null @@ -1,6 +0,0 @@ -

      crack microsoft office 2010 ez-activator


      Download ✔✔✔ https://urlgoal.com/2uCJBh



      - - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Zindagi Na Milegi Dobara Full BEST Movie ).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Zindagi Na Milegi Dobara Full BEST Movie ).md deleted file mode 100644 index 3bcb34af0d0f4b1330fc6564ee3e459112bd2ced..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Zindagi Na Milegi Dobara Full BEST Movie ).md +++ /dev/null @@ -1,10 +0,0 @@ -
      -

      zindagi na milegi dobara – promo of the song ‘aashiqui’ of the film directed by zoya akhtar and produced by farhan akhtar and ritesh. http://www.youtube.com/watch?v=pbcqtfsul-4. zindagi na milegi dobara (2011) hd 1080p (2h and 36m). zindagi na milegi dobara is a 2011 indian hindi-language buddy road comedy drama film directed by zoya akhtar and produced by farhan akhtar and ritesh.

      -

      HD Online Player (Zindagi Na Milegi Dobara full movie )


      Download Zip ✏ ✏ ✏ https://urlgoal.com/2uCJO7



      -

      zindagi na milegi dobara is a 2011 indian hindi-language buddy road comedy drama film directed by zoya akhtar and produced by farhan akhtar and ritesh. the songs we recommend you to listen to - http://www.gotp.com/ https://soundcloud.com/user-856165817 https://soundcloud.com/user-857207879 https://soundcloud.com/user-859120377 https://soundcloud.com/user-860995452 https://soundcloud.com/user-860968790 https://soundcloud.com/user-860983787. zindagi na milegi dobara (2011) hd 1080p (2h and 36m). zindagi na milegi dobara is a 2011 indian hindi-language buddy road comedy drama film directed by zoya akhtar and produced by farhan akhtar and ritesh.

      -

      only on eros now - https://goo.gl/gfuyuxthe much awaited heavily star studded movie of the year is. check out full video song dil dhadakne do from movie zindagi na milegi dobara starring hrithik roshan, kartina kaif,farhan akhtar,.

      -

      zindagi na milegi dobara (2010) | official website | eros now. rahul, the title character played by prem chopra, does not believe that every friend has to be a perfect friend like yours. she believes that a friend should be a friend, no more than that.

      -

      watch zindagi na milegi dobara full movie online - eros now. online on http://goo.gl/gfuyux the much awaited heavily star studded movie of the year is. check out full video song dil dhadakne do from movie zindagi na milegi dobara starring hrithik roshan, kartina kaif,farhan akhtar,.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/robin0307/MMOCR/configs/textdet/maskrcnn/README.md b/spaces/robin0307/MMOCR/configs/textdet/maskrcnn/README.md deleted file mode 100644 index c6ef17e7659558a4f41834f4614d58caddcbe208..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/textdet/maskrcnn/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Mask R-CNN - -> [Mask R-CNN](https://arxiv.org/abs/1703.06870) - - - -## Abstract - -We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. - -
      - -
      - -## Results and models - -### CTW1500 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------------------------------: | :--------------: | :-----------: | :----------: | :-----: | :-------: | :----: | :-------: | :---: | :-------------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py) | ImageNet | CTW1500 Train | CTW1500 Test | 160 | 1600 | 0.753 | 0.712 | 0.732 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.log.json) | - -### ICDAR2015 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :--------------------------------------------------------: | :--------------: | :-------------: | :------------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py) | ImageNet | ICDAR2015 Train | ICDAR2015 Test | 160 | 1920 | 0.783 | 0.872 | 0.825 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.log.json) | - -### ICDAR2017 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :---------------------------------------------------------: | :--------------: | :-------------: | :-----------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py) | ImageNet | ICDAR2017 Train | ICDAR2017 Val | 160 | 1600 | 0.754 | 0.827 | 0.789 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.log.json) | - -```{note} -We tuned parameters with the techniques in [Pyramid Mask Text Detector](https://arxiv.org/abs/1903.11800) -``` - -## Citation - -```bibtex -@INPROCEEDINGS{8237584, - author={K. {He} and G. {Gkioxari} and P. {Dollár} and R. {Girshick}}, - booktitle={2017 IEEE International Conference on Computer Vision (ICCV)}, - title={Mask R-CNN}, - year={2017}, - pages={2980-2988}, - doi={10.1109/ICCV.2017.322}} -``` diff --git a/spaces/robinhad/ukrainian-tts/tests/test_formatter.py b/spaces/robinhad/ukrainian-tts/tests/test_formatter.py deleted file mode 100644 index 403abc61da8d0d861dcd8026a92ad89cb3473089..0000000000000000000000000000000000000000 --- a/spaces/robinhad/ukrainian-tts/tests/test_formatter.py +++ /dev/null @@ -1,64 +0,0 @@ -from ukrainian_tts.formatter import preprocess_text -import pytest - - -@pytest.mark.parametrize( - "text,expected", - [ - ("Quality of life update", "кваліті оф ліфе юпдате"), - ("Він украв 20000000 $", "він украв двадцять мільйонів доларів"), - ("Він украв 20000000", "він украв двадцять мільйонів"), - ("Він украв 1 $", "він украв один долар"), - ("Він украв 2 $", "він украв два долари"), - ("Він украв 2 ₴", "він украв дві гривні"), - ( - "111 000 000 000 доларів державного боргу.", - "сто одинадцять мільярдів доларів державного боргу.", - ), - ( - "11100000001 доларів державного боргу.", - "одинадцять мільярдів сто мільйонів один доларів державного боргу.", - ), - ( - "10000$, 15000 корупціонерів", - "десять тисяч доларів, п'ятнадцять тисяч корупціонерів", - ), - ( - "10000 $, 15000 корупціонерів", - "десять тисяч доларів, п'ятнадцять тисяч корупціонерів", - ), - ( - "$10000, 15000 корупціонерів", - "десять тисяч доларів, п'ятнадцять тисяч корупціонерів", - ), - ( - "10000$ у еквіваленті борщових заправок", - "десять тисяч доларів у еквіваленті борщових заправок", - ), - ("10-30-40-50-5-9-5", "десять-тридцять-сорок-п'ятдесят-п'ять-дев'ять-п'ять"), - ], -) -def test_formatter(text, expected): - assert preprocess_text(text) == expected - - -# Purspose of these tests, to have clearly separate list of issues -# in the conversion. Once fixed, these cases should move to test_formatter -# We still want make sure that no changes happens there, as any regressions -# is bad, or interesting. -@pytest.mark.parametrize( - "text,expected", - [ - # Should be два долара - ( - "2 $, 15000 корупціонерів", - "два доларів, п'ятнадцять тисяч корупціонерів", - ), - # this is wrong case, should be "це дев'ятнадцятирічне вино." - # Implementing this, require to have proper parsing of words into the token stream - # which reqiure reworking of current approach. - ("це 19-річне вино.", "це дев'ятнадцять-річне вино."), - ], -) -def test_planned_formatter_issues(text, expected): - assert preprocess_text(text) == expected diff --git a/spaces/runa91/bite_gradio/src/configs/dataset_path_configs.py b/spaces/runa91/bite_gradio/src/configs/dataset_path_configs.py deleted file mode 100644 index d8d5d63ee0ecd6064157f37983820c0db73d2fe7..0000000000000000000000000000000000000000 --- a/spaces/runa91/bite_gradio/src/configs/dataset_path_configs.py +++ /dev/null @@ -1,21 +0,0 @@ - - -import numpy as np -import os -import sys - -abs_barc_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',)) - -# stanext dataset -# (1) path to stanext dataset -STAN_V12_ROOT_DIR = '/ps/scratch/nrueegg/new_projects/Animals/data/dog_datasets/Stanford_Dogs_Dataset' + '/StanfordExtra_V12/' -IMG_V12_DIR = os.path.join(STAN_V12_ROOT_DIR, 'StanExtV12_Images') -JSON_V12_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', "StanfordExtra_v12.json") -STAN_V12_TRAIN_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'train_stanford_StanfordExtra_v12.npy') -STAN_V12_VAL_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'val_stanford_StanfordExtra_v12.npy') -STAN_V12_TEST_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'test_stanford_StanfordExtra_v12.npy') -# (2) path to related data such as breed indices and prepared predictions for withers, throat and eye keypoints -STANEXT_RELATED_DATA_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'stanext_related_data') - -# test image crop dataset -TEST_IMAGE_CROP_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'datasets', 'test_image_crops') diff --git a/spaces/rzimmerdev/lenet_mnist/src/predict.py b/spaces/rzimmerdev/lenet_mnist/src/predict.py deleted file mode 100644 index 7f1c5e3a97376dd384145545c4e8909efda59c96..0000000000000000000000000000000000000000 --- a/spaces/rzimmerdev/lenet_mnist/src/predict.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -import torch -from torch import nn -import numpy as np - -import plotly.express as px -from plotly.subplots import make_subplots - -from src.trainer import LitTrainer -from src.models import CNN -from src.dataset import DatasetMNIST, download_mnist - - -def load_pl_net(path="checkpoints/lightning_logs/version_26/checkpoints/epoch=9-step=1000.ckpt"): - pl_net = LitTrainer.load_from_checkpoint(path, model=CNN(1, 10)) - return pl_net - - -def load_torch_net(path="checkpoints/pytorch/version_0.pt"): - state_dict = torch.load(path) - net = CNN(1, 10) - net.load_state_dict(state_dict) - return net - - -def get_sequence(model): - fig = make_subplots(rows=2, cols=5) - - i, j = 0, np.random.randint(0, 30000) - - while i < 10: - x, y = dataset[j] - - predicted, p = predict(x, model) - - if predicted == i and p > 0.95: - img = np.flip(np.array(x.reshape(28, 28)), 0) - fig.add_trace(px.imshow(img).data[0], row=int(i/5)+1, col=i % 5+1) - i += 1 - j += 1 - return fig - - -def predict(x, model, device="cuda"): - y_pred = model(x.to(device)).detach().cpu() - predicted = int(np.argmax(y_pred)) - p = torch.max(nn.functional.softmax(y_pred, dim=0)) - - return predicted, p - - -def predict_interval(x, model, device="cuda"): - y_pred = model(x.to(device)) - - print(y_pred) - - predicted = np.argsort(y_pred.cpu().detach().numpy()) - p = nn.functional.softmax(y_pred, dim=0) - - return {int(i): float(p[i]) for i in predicted} - - -if __name__ == "__main__": - mnist = download_mnist("downloads/mnist/") - dataset, test_data = DatasetMNIST(*mnist["train"]), DatasetMNIST(*mnist["test"]) - - print("PyTorch Lightning Network") - get_sequence(load_pl_net().to("cuda")).show() - print("Manual Network") - get_sequence(load_torch_net().to("cuda")).show() diff --git a/spaces/saefro991/aet_demo/lightning_module.py b/spaces/saefro991/aet_demo/lightning_module.py deleted file mode 100644 index 70d8d931ec375974e9d39368a3a094626e67d464..0000000000000000000000000000000000000000 --- a/spaces/saefro991/aet_demo/lightning_module.py +++ /dev/null @@ -1,792 +0,0 @@ -import torch -import pytorch_lightning as pl -import torchaudio -import os -import pathlib -import tqdm -from model import ( - EncoderModule, - ChannelFeatureModule, - ChannelModule, - MultiScaleSpectralLoss, - GSTModule, -) - -class PretrainLightningModule(pl.LightningModule): - def __init__(self, config): - super().__init__() - self.save_hyperparameters() - self.config = config - if config["general"]["use_gst"]: - self.encoder = EncoderModule(config) - self.gst = GSTModule(config) - else: - self.encoder = EncoderModule(config, use_channel=True) - self.channelfeats = ChannelFeatureModule(config) - - self.channel = ChannelModule(config) - self.vocoder = None - - self.criteria_a = MultiScaleSpectralLoss(config) - if "feature_loss" in config["train"]: - if config["train"]["feature_loss"]["type"] == "mae": - self.criteria_b = torch.nn.L1Loss() - else: - self.criteria_b = torch.nn.MSELoss() - else: - self.criteria = torch.nn.L1Loss() - self.alpha = config["train"]["alpha"] - - def forward(self, melspecs, wavsaux): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(melspecs.unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(melspecs.transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder(melspecs.unsqueeze(1).transpose(2, 3)) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - wavsdeg = self.channel(wavsaux, chfeats) - return enc_out, wavsdeg - - def training_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - wavsdeg = self.channel(batch["wavsaux"], chfeats) - loss_recons = self.criteria_a(wavsdeg, batch["wavs"]) - if self.config["general"]["feature_type"] == "melspec": - loss_encoder = self.criteria_b(enc_out, batch["melspecsaux"]) - elif self.config["general"]["feature_type"] == "vocfeats": - loss_encoder = self.criteria_b(enc_out, batch["melceps"]) - loss = self.alpha * loss_recons + (1.0 - self.alpha) * loss_encoder - self.log( - "train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True - ) - self.log( - "train_loss_recons", - loss_recons, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - self.log( - "train_loss_encoder", - loss_encoder, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss - - def validation_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - wavsdeg = self.channel(batch["wavsaux"], chfeats) - loss_recons = self.criteria_a(wavsdeg, batch["wavs"]) - if self.config["general"]["feature_type"] == "melspec": - val_aux_feats = batch["melspecsaux"] - feats_name = "melspec" - loss_encoder = self.criteria_b(enc_out, val_aux_feats) - elif self.config["general"]["feature_type"] == "vocfeats": - val_aux_feats = batch["melceps"] - feats_name = "melcep" - loss_encoder = self.criteria_b(enc_out, val_aux_feats) - loss = self.alpha * loss_recons + (1.0 - self.alpha) * loss_encoder - logger_img_dict = { - "val_src_melspec": batch["melspecs"], - "val_pred_{}".format(feats_name): enc_out, - "val_aux_{}".format(feats_name): val_aux_feats, - } - logger_wav_dict = { - "val_src_wav": batch["wavs"], - "val_pred_wav": wavsdeg, - "val_aux_wav": batch["wavsaux"], - } - return { - "val_loss": loss, - "val_loss_recons": loss_recons, - "val_loss_encoder": loss_encoder, - "logger_dict": [logger_img_dict, logger_wav_dict], - } - - def validation_epoch_end(self, outputs): - val_loss = torch.stack([out["val_loss"] for out in outputs]).mean().item() - val_loss_recons = ( - torch.stack([out["val_loss_recons"] for out in outputs]).mean().item() - ) - val_loss_encoder = ( - torch.stack([out["val_loss_encoder"] for out in outputs]).mean().item() - ) - self.log("val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True) - self.log( - "val_loss_recons", - val_loss_recons, - on_epoch=True, - prog_bar=True, - logger=True, - ) - self.log( - "val_loss_encoder", - val_loss_encoder, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - def test_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - wavsdeg = self.channel(batch["wavsaux"], chfeats) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - enc_feats_aux = batch["melspecsaux"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - enc_feats_aux = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melceps"]), dim=1 - ) - recons_wav = self.vocoder(enc_feats_aux).squeeze(1) - remas = self.vocoder(enc_feats).squeeze(1) - if self.config["general"]["feature_type"] == "melspec": - enc_feats_input = batch["melspecs"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats_input = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melcepssrc"]), dim=1 - ) - input_recons = self.vocoder(enc_feats_input).squeeze(1) - if "wavsaux" in batch: - gt_wav = batch["wavsaux"] - else: - gt_wav = None - return { - "reconstructed": recons_wav, - "remastered": remas, - "channeled": wavsdeg, - "groundtruth": gt_wav, - "input": batch["wavs"], - "input_recons": input_recons, - } - - def test_epoch_end(self, outputs): - wav_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_wavs" - ) - os.makedirs(wav_dir, exist_ok=True) - mel_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_mels" - ) - os.makedirs(mel_dir, exist_ok=True) - print("Saving mel spectrogram plots ...") - for idx, out in enumerate(tqdm.tqdm(outputs)): - for key in [ - "reconstructed", - "remastered", - "channeled", - "input", - "input_recons", - "groundtruth", - ]: - if out[key] != None: - torchaudio.save( - wav_dir / "{}-{}.wav".format(idx, key), - out[key][0, ...].unsqueeze(0).cpu(), - sample_rate=self.config["preprocess"]["sampling_rate"], - channels_first=True, - ) - - def configure_optimizers(self): - optimizer = torch.optim.Adam( - self.parameters(), lr=self.config["train"]["learning_rate"] - ) - lr_scheduler_config = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, mode="min", factor=0.5, min_lr=1e-5, verbose=True - ), - "interval": "epoch", - "frequency": 3, - "monitor": "val_loss", - } - return {"optimizer": optimizer, "lr_scheduler": lr_scheduler_config} - - -class SSLBaseModule(pl.LightningModule): - def __init__(self, config): - super().__init__() - self.save_hyperparameters() - self.config = config - if config["general"]["use_gst"]: - self.encoder = EncoderModule(config) - self.gst = GSTModule(config) - else: - self.encoder = EncoderModule(config, use_channel=True) - self.channelfeats = ChannelFeatureModule(config) - self.channel = ChannelModule(config) - - if config["train"]["load_pretrained"]: - pre_model = PretrainLightningModule.load_from_checkpoint( - checkpoint_path=config["train"]["pretrained_path"] - ) - self.encoder.load_state_dict(pre_model.encoder.state_dict(), strict=False) - self.channel.load_state_dict(pre_model.channel.state_dict(), strict=False) - if config["general"]["use_gst"]: - self.gst.load_state_dict(pre_model.gst.state_dict(), strict=False) - else: - self.channelfeats.load_state_dict( - pre_model.channelfeats.state_dict(), strict=False - ) - - self.vocoder = None - self.criteria = self.get_loss_function(config) - - def training_step(self, batch, batch_idx): - raise NotImplementedError() - - def validation_step(self, batch, batch_idx): - raise NotImplementedError() - - def validation_epoch_end(self, outputs): - raise NotImplementedError() - - def configure_optimizers(self): - raise NotImplementedError() - - def get_loss_function(self, config): - raise NotImplementedError() - - def forward(self, melspecs, f0s=None): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(melspecs.unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(melspecs.transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder(melspecs.unsqueeze(1).transpose(2, 3)) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((f0s.unsqueeze(1), enc_out), dim=1) - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - return remas, wavsdeg - - def test_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - if self.config["general"]["feature_type"] == "melspec": - enc_feats_input = batch["melspecs"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats_input = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melcepssrc"]), dim=1 - ) - input_recons = self.vocoder(enc_feats_input).squeeze(1) - if "wavsaux" in batch: - gt_wav = batch["wavsaux"] - if self.config["general"]["feature_type"] == "melspec": - enc_feats_aux = batch["melspecsaux"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats_aux = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melceps"]), dim=1 - ) - recons_wav = self.vocoder(enc_feats_aux).squeeze(1) - else: - gt_wav = None - recons_wav = None - return { - "reconstructed": recons_wav, - "remastered": remas, - "channeled": wavsdeg, - "input": batch["wavs"], - "input_recons": input_recons, - "groundtruth": gt_wav, - } - - def test_epoch_end(self, outputs): - wav_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_wavs" - ) - os.makedirs(wav_dir, exist_ok=True) - mel_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_mels" - ) - os.makedirs(mel_dir, exist_ok=True) - print("Saving mel spectrogram plots ...") - for idx, out in enumerate(tqdm.tqdm(outputs)): - plot_keys = [] - for key in [ - "reconstructed", - "remastered", - "channeled", - "input", - "input_recons", - "groundtruth", - ]: - if out[key] != None: - plot_keys.append(key) - torchaudio.save( - wav_dir / "{}-{}.wav".format(idx, key), - out[key][0, ...].unsqueeze(0).cpu(), - sample_rate=self.config["preprocess"]["sampling_rate"], - channels_first=True, - ) - - -class SSLStepLightningModule(SSLBaseModule): - def __init__(self, config): - super().__init__(config) - if config["train"]["fix_channel"]: - for param in self.channel.parameters(): - param.requires_grad = False - - def training_step(self, batch, batch_idx, optimizer_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - loss = self.criteria(wavsdeg, batch["wavs"]) - self.log( - "train_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss - - def validation_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - feats_name = "melspec" - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - feats_name = "melcep" - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - loss = self.criteria(wavsdeg, batch["wavs"]) - logger_img_dict = { - "val_src_melspec": batch["melspecs"], - "val_pred_{}".format(feats_name): enc_out, - } - for auxfeats in ["melceps", "melspecsaux"]: - if auxfeats in batch: - logger_img_dict["val_aux_{}".format(auxfeats)] = batch[auxfeats] - logger_wav_dict = { - "val_src_wav": batch["wavs"], - "val_remastered_wav": remas, - "val_pred_wav": wavsdeg, - } - if "wavsaux" in batch: - logger_wav_dict["val_aux_wav"] = batch["wavsaux"] - d_out = {"val_loss": loss, "logger_dict": [logger_img_dict, logger_wav_dict]} - return d_out - - def validation_epoch_end(self, outputs): - self.log( - "val_loss", - torch.stack([out["val_loss"] for out in outputs]).mean().item(), - on_epoch=True, - prog_bar=True, - logger=True, - ) - - def optimizer_step( - self, - epoch, - batch_idx, - optimizer, - optimizer_idx, - optimizer_closure, - on_tpu=False, - using_native_amp=False, - using_lbfgs=False, - ): - if epoch < self.config["train"]["epoch_channel"]: - if optimizer_idx == 0: - optimizer.step(closure=optimizer_closure) - elif optimizer_idx == 1: - optimizer_closure() - else: - if optimizer_idx == 0: - optimizer_closure() - elif optimizer_idx == 1: - optimizer.step(closure=optimizer_closure) - - def configure_optimizers(self): - if self.config["train"]["fix_channel"]: - if self.config["general"]["use_gst"]: - optimizer_channel = torch.optim.Adam( - self.gst.parameters(), lr=self.config["train"]["learning_rate"] - ) - else: - optimizer_channel = torch.optim.Adam( - self.channelfeats.parameters(), - lr=self.config["train"]["learning_rate"], - ) - optimizer_encoder = torch.optim.Adam( - self.encoder.parameters(), lr=self.config["train"]["learning_rate"] - ) - else: - if self.config["general"]["use_gst"]: - optimizer_channel = torch.optim.Adam( - [ - {"params": self.channel.parameters()}, - {"params": self.gst.parameters()}, - ], - lr=self.config["train"]["learning_rate"], - ) - else: - optimizer_channel = torch.optim.Adam( - [ - {"params": self.channel.parameters()}, - {"params": self.channelfeats.parameters()}, - ], - lr=self.config["train"]["learning_rate"], - ) - optimizer_encoder = torch.optim.Adam( - self.encoder.parameters(), lr=self.config["train"]["learning_rate"] - ) - optimizers = [optimizer_channel, optimizer_encoder] - schedulers = [ - { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizers[0], mode="min", factor=0.5, min_lr=1e-5, verbose=True - ), - "interval": "epoch", - "frequency": 3, - "monitor": "val_loss", - }, - { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizers[1], mode="min", factor=0.5, min_lr=1e-5, verbose=True - ), - "interval": "epoch", - "frequency": 3, - "monitor": "val_loss", - }, - ] - return optimizers, schedulers - - def get_loss_function(self, config): - return MultiScaleSpectralLoss(config) - - -class SSLDualLightningModule(SSLBaseModule): - def __init__(self, config): - super().__init__(config) - if config["train"]["fix_channel"]: - for param in self.channel.parameters(): - param.requires_grad = False - self.spec_module = torchaudio.transforms.MelSpectrogram( - sample_rate=config["preprocess"]["sampling_rate"], - n_fft=config["preprocess"]["fft_length"], - win_length=config["preprocess"]["frame_length"], - hop_length=config["preprocess"]["frame_shift"], - f_min=config["preprocess"]["fmin"], - f_max=config["preprocess"]["fmax"], - n_mels=config["preprocess"]["n_mels"], - power=1, - center=True, - norm="slaney", - mel_scale="slaney", - ) - self.beta = config["train"]["beta"] - self.criteria_a, self.criteria_b = self.get_loss_function(config) - - def training_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - loss_recons = self.criteria_a(wavsdeg, batch["wavs"]) - - with torch.no_grad(): - wavsdegtask = self.channel(batch["wavstask"], chfeats) - melspecstask = self.calc_spectrogram(wavsdegtask) - if self.config["general"]["use_gst"]: - enc_out_task = self.encoder(melspecstask.unsqueeze(1).transpose(2, 3)) - else: - enc_out_task, _ = self.encoder(melspecstask.unsqueeze(1).transpose(2, 3)) - enc_out_task = enc_out_task.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - loss_task = self.criteria_b(enc_out_task, batch["melspecstask"]) - elif self.config["general"]["feature_type"] == "vocfeats": - loss_task = self.criteria_b(enc_out_task, batch["melcepstask"]) - loss = self.beta * loss_recons + (1 - self.beta) * loss_task - - self.log( - "train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True - ) - self.log( - "train_loss_recons", - loss_recons, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - self.log( - "train_loss_task", - loss_task, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss - - def validation_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - feats_name = "melspec" - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - feats_name = "melcep" - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - loss_recons = self.criteria_a(wavsdeg, batch["wavs"]) - - wavsdegtask = self.channel(batch["wavstask"], chfeats) - melspecstask = self.calc_spectrogram(wavsdegtask) - if self.config["general"]["use_gst"]: - enc_out_task = self.encoder(melspecstask.unsqueeze(1).transpose(2, 3)) - else: - enc_out_task, _ = self.encoder(melspecstask.unsqueeze(1).transpose(2, 3)) - enc_out_task = enc_out_task.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_out_task_truth = batch["melspecstask"] - loss_task = self.criteria_b(enc_out_task, enc_out_task_truth) - elif self.config["general"]["feature_type"] == "vocfeats": - enc_out_task_truth = batch["melcepstask"] - loss_task = self.criteria_b(enc_out_task, enc_out_task_truth) - loss = self.beta * loss_recons + (1 - self.beta) * loss_task - - logger_img_dict = { - "val_src_melspec": batch["melspecs"], - "val_pred_{}".format(feats_name): enc_out, - "val_truth_{}_task".format(feats_name): enc_out_task_truth, - "val_pred_{}_task".format(feats_name): enc_out_task, - } - for auxfeats in ["melceps", "melspecsaux"]: - if auxfeats in batch: - logger_img_dict["val_aux_{}".format(auxfeats)] = batch[auxfeats] - logger_wav_dict = { - "val_src_wav": batch["wavs"], - "val_remastered_wav": remas, - "val_pred_wav": wavsdeg, - "val_truth_wavtask": batch["wavstask"], - "val_deg_wavtask": wavsdegtask, - } - if "wavsaux" in batch: - logger_wav_dict["val_aux_wav"] = batch["wavsaux"] - - d_out = { - "val_loss": loss, - "val_loss_recons": loss_recons, - "val_loss_task": loss_task, - "logger_dict": [logger_img_dict, logger_wav_dict], - } - return d_out - - def validation_epoch_end(self, outputs): - self.log( - "val_loss", - torch.stack([out["val_loss"] for out in outputs]).mean().item(), - on_epoch=True, - prog_bar=True, - logger=True, - ) - self.log( - "val_loss_recons", - torch.stack([out["val_loss_recons"] for out in outputs]).mean().item(), - on_epoch=True, - prog_bar=True, - logger=True, - ) - self.log( - "val_loss_task", - torch.stack([out["val_loss_task"] for out in outputs]).mean().item(), - on_epoch=True, - prog_bar=True, - logger=True, - ) - - def test_step(self, batch, batch_idx): - if self.config["general"]["use_gst"]: - enc_out = self.encoder(batch["melspecs"].unsqueeze(1).transpose(2, 3)) - chfeats = self.gst(batch["melspecs"].transpose(1, 2)) - else: - enc_out, enc_hidden = self.encoder( - batch["melspecs"].unsqueeze(1).transpose(2, 3) - ) - chfeats = self.channelfeats(enc_hidden) - enc_out = enc_out.squeeze(1).transpose(1, 2) - if self.config["general"]["feature_type"] == "melspec": - enc_feats = enc_out - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats = torch.cat((batch["f0s"].unsqueeze(1), enc_out), dim=1) - remas = self.vocoder(enc_feats).squeeze(1) - wavsdeg = self.channel(remas, chfeats) - if self.config["general"]["feature_type"] == "melspec": - enc_feats_input = batch["melspecs"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats_input = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melcepssrc"]), dim=1 - ) - input_recons = self.vocoder(enc_feats_input).squeeze(1) - - wavsdegtask = self.channel(batch["wavstask"], chfeats) - if "wavsaux" in batch: - gt_wav = batch["wavsaux"] - if self.config["general"]["feature_type"] == "melspec": - enc_feats_aux = batch["melspecsaux"] - elif self.config["general"]["feature_type"] == "vocfeats": - enc_feats_aux = torch.cat( - (batch["f0s"].unsqueeze(1), batch["melceps"]), dim=1 - ) - recons_wav = self.vocoder(enc_feats_aux).squeeze(1) - else: - gt_wav = None - recons_wav = None - return { - "reconstructed": recons_wav, - "remastered": remas, - "channeled": wavsdeg, - "channeled_task": wavsdegtask, - "input": batch["wavs"], - "input_recons": input_recons, - "groundtruth": gt_wav, - } - - def test_epoch_end(self, outputs): - wav_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_wavs" - ) - os.makedirs(wav_dir, exist_ok=True) - mel_dir = ( - pathlib.Path(self.logger.experiment[0].log_dir).parent.parent / "test_mels" - ) - os.makedirs(mel_dir, exist_ok=True) - print("Saving mel spectrogram plots ...") - for idx, out in enumerate(tqdm.tqdm(outputs)): - plot_keys = [] - for key in [ - "reconstructed", - "remastered", - "channeled", - "channeled_task", - "input", - "input_recons", - "groundtruth", - ]: - if out[key] != None: - plot_keys.append(key) - torchaudio.save( - wav_dir / "{}-{}.wav".format(idx, key), - out[key][0, ...].unsqueeze(0).cpu(), - sample_rate=self.config["preprocess"]["sampling_rate"], - channels_first=True, - ) - - def configure_optimizers(self): - optimizer = torch.optim.Adam( - self.parameters(), lr=self.config["train"]["learning_rate"] - ) - lr_scheduler_config = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, mode="min", factor=0.5, min_lr=1e-5, verbose=True - ), - "interval": "epoch", - "frequency": 3, - "monitor": "val_loss", - } - return {"optimizer": optimizer, "lr_scheduler": lr_scheduler_config} - - def calc_spectrogram(self, wav): - specs = self.spec_module(wav) - log_spec = torch.log( - torch.clamp_min(specs, self.config["preprocess"]["min_magnitude"]) - * self.config["preprocess"]["comp_factor"] - ).to(torch.float32) - return log_spec - - def get_loss_function(self, config): - if config["train"]["feature_loss"]["type"] == "mae": - feature_loss = torch.nn.L1Loss() - else: - feature_loss = torch.nn.MSELoss() - return MultiScaleSpectralLoss(config), feature_loss diff --git a/spaces/samakarov/Lama-Cleaner/README.md b/spaces/samakarov/Lama-Cleaner/README.md deleted file mode 100644 index 96a3861db3ad3388ca0c838d3f3258553ce5b95e..0000000000000000000000000000000000000000 --- a/spaces/samakarov/Lama-Cleaner/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Lama Cleaner Lama -emoji: ⚡ -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: cvegvg/Lama-Cleaner-clean ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/samuelinferences/TabPFN/TabPFN/initializers.py b/spaces/samuelinferences/TabPFN/TabPFN/initializers.py deleted file mode 100644 index 4a2de2711a62676223950c35e5ce88cabcb086a0..0000000000000000000000000000000000000000 --- a/spaces/samuelinferences/TabPFN/TabPFN/initializers.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from torch import nn - -def get_NormalInitializer(std): - def initializer(m): - if isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, std) - nn.init.normal_(m.bias, 0, std) - return initializer \ No newline at end of file diff --git a/spaces/sdhsdhk/bingosjj/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/sdhsdhk/bingosjj/src/lib/hooks/use-copy-to-clipboard.tsx deleted file mode 100644 index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/lib/hooks/use-copy-to-clipboard.tsx +++ /dev/null @@ -1,33 +0,0 @@ -'use client' - -import * as React from 'react' - -export interface useCopyToClipboardProps { - timeout?: number -} - -export function useCopyToClipboard({ - timeout = 2000 -}: useCopyToClipboardProps) { - const [isCopied, setIsCopied] = React.useState(false) - - const copyToClipboard = (value: string) => { - if (typeof window === 'undefined' || !navigator.clipboard?.writeText) { - return - } - - if (!value) { - return - } - - navigator.clipboard.writeText(value).then(() => { - setIsCopied(true) - - setTimeout(() => { - setIsCopied(false) - }, timeout) - }) - } - - return { isCopied, copyToClipboard } -} diff --git a/spaces/segments-tobias/conex/espnet/mt/pytorch_backend/mt.py b/spaces/segments-tobias/conex/espnet/mt/pytorch_backend/mt.py deleted file mode 100644 index 88474c944ed507c65733d498a50bcb0d46d1be8d..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/mt/pytorch_backend/mt.py +++ /dev/null @@ -1,600 +0,0 @@ -#!/usr/bin/env python3 -# encoding: utf-8 - -# Copyright 2019 Kyoto University (Hirofumi Inaguma) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Training/decoding definition for the text translation task.""" - -import json -import logging -import os -import sys - -from chainer import training -from chainer.training import extensions -import numpy as np -from tensorboardX import SummaryWriter -import torch - -from espnet.asr.asr_utils import adadelta_eps_decay -from espnet.asr.asr_utils import adam_lr_decay -from espnet.asr.asr_utils import add_results_to_json -from espnet.asr.asr_utils import CompareValueTrigger -from espnet.asr.asr_utils import restore_snapshot -from espnet.asr.asr_utils import snapshot_object -from espnet.asr.asr_utils import torch_load -from espnet.asr.asr_utils import torch_resume -from espnet.asr.asr_utils import torch_snapshot -from espnet.nets.mt_interface import MTInterface -from espnet.nets.pytorch_backend.e2e_asr import pad_list -from espnet.utils.dataset import ChainerDataLoader -from espnet.utils.dataset import TransformDataset -from espnet.utils.deterministic_utils import set_deterministic_pytorch -from espnet.utils.dynamic_import import dynamic_import -from espnet.utils.io_utils import LoadInputsAndTargets -from espnet.utils.training.batchfy import make_batchset -from espnet.utils.training.iterators import ShufflingEnabler -from espnet.utils.training.tensorboard_logger import TensorboardLogger -from espnet.utils.training.train_utils import check_early_stop -from espnet.utils.training.train_utils import set_early_stop - -from espnet.asr.pytorch_backend.asr import CustomEvaluator -from espnet.asr.pytorch_backend.asr import CustomUpdater -from espnet.asr.pytorch_backend.asr import load_trained_model - -import matplotlib - -matplotlib.use("Agg") - -if sys.version_info[0] == 2: - from itertools import izip_longest as zip_longest -else: - from itertools import zip_longest as zip_longest - - -class CustomConverter(object): - """Custom batch converter for Pytorch.""" - - def __init__(self): - """Construct a CustomConverter object.""" - self.ignore_id = -1 - self.pad = 0 - # NOTE: we reserve index:0 for although this is reserved for a blank class - # in ASR. However, - # blank labels are not used in NMT. To keep the vocabulary size, - # we use index:0 for padding instead of adding one more class. - - def __call__(self, batch, device=torch.device("cpu")): - """Transform a batch and send it to a device. - - Args: - batch (list): The batch to transform. - device (torch.device): The device to send to. - - Returns: - tuple(torch.Tensor, torch.Tensor, torch.Tensor) - - """ - # batch should be located in list - assert len(batch) == 1 - xs, ys = batch[0] - - # get batch of lengths of input sequences - ilens = np.array([x.shape[0] for x in xs]) - - # perform padding and convert to tensor - xs_pad = pad_list([torch.from_numpy(x).long() for x in xs], self.pad).to(device) - ilens = torch.from_numpy(ilens).to(device) - ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], self.ignore_id).to( - device - ) - - return xs_pad, ilens, ys_pad - - -def train(args): - """Train with the given args. - - Args: - args (namespace): The program arguments. - - """ - set_deterministic_pytorch(args) - - # check cuda availability - if not torch.cuda.is_available(): - logging.warning("cuda is not available") - - # get input and output dimension info - with open(args.valid_json, "rb") as f: - valid_json = json.load(f)["utts"] - utts = list(valid_json.keys()) - idim = int(valid_json[utts[0]]["output"][1]["shape"][1]) - odim = int(valid_json[utts[0]]["output"][0]["shape"][1]) - logging.info("#input dims : " + str(idim)) - logging.info("#output dims: " + str(odim)) - - # specify model architecture - model_class = dynamic_import(args.model_module) - model = model_class(idim, odim, args) - assert isinstance(model, MTInterface) - - # write model config - if not os.path.exists(args.outdir): - os.makedirs(args.outdir) - model_conf = args.outdir + "/model.json" - with open(model_conf, "wb") as f: - logging.info("writing a model config file to " + model_conf) - f.write( - json.dumps( - (idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True - ).encode("utf_8") - ) - for key in sorted(vars(args).keys()): - logging.info("ARGS: " + key + ": " + str(vars(args)[key])) - - reporter = model.reporter - - # check the use of multi-gpu - if args.ngpu > 1: - if args.batch_size != 0: - logging.warning( - "batch size is automatically increased (%d -> %d)" - % (args.batch_size, args.batch_size * args.ngpu) - ) - args.batch_size *= args.ngpu - - # set torch device - device = torch.device("cuda" if args.ngpu > 0 else "cpu") - if args.train_dtype in ("float16", "float32", "float64"): - dtype = getattr(torch, args.train_dtype) - else: - dtype = torch.float32 - model = model.to(device=device, dtype=dtype) - - logging.warning( - "num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format( - sum(p.numel() for p in model.parameters()), - sum(p.numel() for p in model.parameters() if p.requires_grad), - sum(p.numel() for p in model.parameters() if p.requires_grad) - * 100.0 - / sum(p.numel() for p in model.parameters()), - ) - ) - - # Setup an optimizer - if args.opt == "adadelta": - optimizer = torch.optim.Adadelta( - model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay - ) - elif args.opt == "adam": - optimizer = torch.optim.Adam( - model.parameters(), lr=args.lr, weight_decay=args.weight_decay - ) - elif args.opt == "noam": - from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt - - optimizer = get_std_opt( - model.parameters(), - args.adim, - args.transformer_warmup_steps, - args.transformer_lr, - ) - else: - raise NotImplementedError("unknown optimizer: " + args.opt) - - # setup apex.amp - if args.train_dtype in ("O0", "O1", "O2", "O3"): - try: - from apex import amp - except ImportError as e: - logging.error( - f"You need to install apex for --train-dtype {args.train_dtype}. " - "See https://github.com/NVIDIA/apex#linux" - ) - raise e - if args.opt == "noam": - model, optimizer.optimizer = amp.initialize( - model, optimizer.optimizer, opt_level=args.train_dtype - ) - else: - model, optimizer = amp.initialize( - model, optimizer, opt_level=args.train_dtype - ) - use_apex = True - else: - use_apex = False - - # FIXME: TOO DIRTY HACK - setattr(optimizer, "target", reporter) - setattr(optimizer, "serialize", lambda s: reporter.serialize(s)) - - # Setup a converter - converter = CustomConverter() - - # read json data - with open(args.train_json, "rb") as f: - train_json = json.load(f)["utts"] - with open(args.valid_json, "rb") as f: - valid_json = json.load(f)["utts"] - - use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0 - # make minibatch list (variable length) - train = make_batchset( - train_json, - args.batch_size, - args.maxlen_in, - args.maxlen_out, - args.minibatches, - min_batch_size=args.ngpu if args.ngpu > 1 else 1, - shortest_first=use_sortagrad, - count=args.batch_count, - batch_bins=args.batch_bins, - batch_frames_in=args.batch_frames_in, - batch_frames_out=args.batch_frames_out, - batch_frames_inout=args.batch_frames_inout, - mt=True, - iaxis=1, - oaxis=0, - ) - valid = make_batchset( - valid_json, - args.batch_size, - args.maxlen_in, - args.maxlen_out, - args.minibatches, - min_batch_size=args.ngpu if args.ngpu > 1 else 1, - count=args.batch_count, - batch_bins=args.batch_bins, - batch_frames_in=args.batch_frames_in, - batch_frames_out=args.batch_frames_out, - batch_frames_inout=args.batch_frames_inout, - mt=True, - iaxis=1, - oaxis=0, - ) - - load_tr = LoadInputsAndTargets(mode="mt", load_output=True) - load_cv = LoadInputsAndTargets(mode="mt", load_output=True) - # hack to make batchsize argument as 1 - # actual bathsize is included in a list - # default collate function converts numpy array to pytorch tensor - # we used an empty collate function instead which returns list - train_iter = ChainerDataLoader( - dataset=TransformDataset(train, lambda data: converter([load_tr(data)])), - batch_size=1, - num_workers=args.n_iter_processes, - shuffle=not use_sortagrad, - collate_fn=lambda x: x[0], - ) - valid_iter = ChainerDataLoader( - dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])), - batch_size=1, - shuffle=False, - collate_fn=lambda x: x[0], - num_workers=args.n_iter_processes, - ) - - # Set up a trainer - updater = CustomUpdater( - model, - args.grad_clip, - {"main": train_iter}, - optimizer, - device, - args.ngpu, - False, - args.accum_grad, - use_apex=use_apex, - ) - trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir) - - if use_sortagrad: - trainer.extend( - ShufflingEnabler([train_iter]), - trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"), - ) - - # Resume from a snapshot - if args.resume: - logging.info("resumed from %s" % args.resume) - torch_resume(args.resume, trainer) - - # Evaluate the model with the test dataset for each epoch - if args.save_interval_iters > 0: - trainer.extend( - CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu), - trigger=(args.save_interval_iters, "iteration"), - ) - else: - trainer.extend( - CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu) - ) - - # Save attention weight each epoch - if args.num_save_attention > 0: - # NOTE: sort it by output lengths - data = sorted( - list(valid_json.items())[: args.num_save_attention], - key=lambda x: int(x[1]["output"][0]["shape"][0]), - reverse=True, - ) - if hasattr(model, "module"): - att_vis_fn = model.module.calculate_all_attentions - plot_class = model.module.attention_plot_class - else: - att_vis_fn = model.calculate_all_attentions - plot_class = model.attention_plot_class - att_reporter = plot_class( - att_vis_fn, - data, - args.outdir + "/att_ws", - converter=converter, - transform=load_cv, - device=device, - ikey="output", - iaxis=1, - ) - trainer.extend(att_reporter, trigger=(1, "epoch")) - else: - att_reporter = None - - # Make a plot for training and validation values - trainer.extend( - extensions.PlotReport( - ["main/loss", "validation/main/loss"], "epoch", file_name="loss.png" - ) - ) - trainer.extend( - extensions.PlotReport( - ["main/acc", "validation/main/acc"], "epoch", file_name="acc.png" - ) - ) - trainer.extend( - extensions.PlotReport( - ["main/ppl", "validation/main/ppl"], "epoch", file_name="ppl.png" - ) - ) - trainer.extend( - extensions.PlotReport( - ["main/bleu", "validation/main/bleu"], "epoch", file_name="bleu.png" - ) - ) - - # Save best models - trainer.extend( - snapshot_object(model, "model.loss.best"), - trigger=training.triggers.MinValueTrigger("validation/main/loss"), - ) - trainer.extend( - snapshot_object(model, "model.acc.best"), - trigger=training.triggers.MaxValueTrigger("validation/main/acc"), - ) - - # save snapshot which contains model and optimizer states - if args.save_interval_iters > 0: - trainer.extend( - torch_snapshot(filename="snapshot.iter.{.updater.iteration}"), - trigger=(args.save_interval_iters, "iteration"), - ) - else: - trainer.extend(torch_snapshot(), trigger=(1, "epoch")) - - # epsilon decay in the optimizer - if args.opt == "adadelta": - if args.criterion == "acc": - trainer.extend( - restore_snapshot( - model, args.outdir + "/model.acc.best", load_fn=torch_load - ), - trigger=CompareValueTrigger( - "validation/main/acc", - lambda best_value, current_value: best_value > current_value, - ), - ) - trainer.extend( - adadelta_eps_decay(args.eps_decay), - trigger=CompareValueTrigger( - "validation/main/acc", - lambda best_value, current_value: best_value > current_value, - ), - ) - elif args.criterion == "loss": - trainer.extend( - restore_snapshot( - model, args.outdir + "/model.loss.best", load_fn=torch_load - ), - trigger=CompareValueTrigger( - "validation/main/loss", - lambda best_value, current_value: best_value < current_value, - ), - ) - trainer.extend( - adadelta_eps_decay(args.eps_decay), - trigger=CompareValueTrigger( - "validation/main/loss", - lambda best_value, current_value: best_value < current_value, - ), - ) - elif args.opt == "adam": - if args.criterion == "acc": - trainer.extend( - restore_snapshot( - model, args.outdir + "/model.acc.best", load_fn=torch_load - ), - trigger=CompareValueTrigger( - "validation/main/acc", - lambda best_value, current_value: best_value > current_value, - ), - ) - trainer.extend( - adam_lr_decay(args.lr_decay), - trigger=CompareValueTrigger( - "validation/main/acc", - lambda best_value, current_value: best_value > current_value, - ), - ) - elif args.criterion == "loss": - trainer.extend( - restore_snapshot( - model, args.outdir + "/model.loss.best", load_fn=torch_load - ), - trigger=CompareValueTrigger( - "validation/main/loss", - lambda best_value, current_value: best_value < current_value, - ), - ) - trainer.extend( - adam_lr_decay(args.lr_decay), - trigger=CompareValueTrigger( - "validation/main/loss", - lambda best_value, current_value: best_value < current_value, - ), - ) - - # Write a log of evaluation statistics for each epoch - trainer.extend( - extensions.LogReport(trigger=(args.report_interval_iters, "iteration")) - ) - report_keys = [ - "epoch", - "iteration", - "main/loss", - "validation/main/loss", - "main/acc", - "validation/main/acc", - "main/ppl", - "validation/main/ppl", - "elapsed_time", - ] - if args.opt == "adadelta": - trainer.extend( - extensions.observe_value( - "eps", - lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][ - "eps" - ], - ), - trigger=(args.report_interval_iters, "iteration"), - ) - report_keys.append("eps") - elif args.opt in ["adam", "noam"]: - trainer.extend( - extensions.observe_value( - "lr", - lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][ - "lr" - ], - ), - trigger=(args.report_interval_iters, "iteration"), - ) - report_keys.append("lr") - if args.report_bleu: - report_keys.append("main/bleu") - report_keys.append("validation/main/bleu") - trainer.extend( - extensions.PrintReport(report_keys), - trigger=(args.report_interval_iters, "iteration"), - ) - - trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters)) - set_early_stop(trainer, args) - - if args.tensorboard_dir is not None and args.tensorboard_dir != "": - trainer.extend( - TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter), - trigger=(args.report_interval_iters, "iteration"), - ) - # Run the training - trainer.run() - check_early_stop(trainer, args.epochs) - - -def trans(args): - """Decode with the given args. - - Args: - args (namespace): The program arguments. - - """ - set_deterministic_pytorch(args) - model, train_args = load_trained_model(args.model) - assert isinstance(model, MTInterface) - model.trans_args = args - - # gpu - if args.ngpu == 1: - gpu_id = list(range(args.ngpu)) - logging.info("gpu id: " + str(gpu_id)) - model.cuda() - - # read json data - with open(args.trans_json, "rb") as f: - js = json.load(f)["utts"] - new_js = {} - - # remove enmpy utterances - if train_args.multilingual: - js = { - k: v - for k, v in js.items() - if v["output"][0]["shape"][0] > 1 and v["output"][1]["shape"][0] > 1 - } - else: - js = { - k: v - for k, v in js.items() - if v["output"][0]["shape"][0] > 0 and v["output"][1]["shape"][0] > 0 - } - - if args.batchsize == 0: - with torch.no_grad(): - for idx, name in enumerate(js.keys(), 1): - logging.info("(%d/%d) decoding " + name, idx, len(js.keys())) - feat = [js[name]["output"][1]["tokenid"].split()] - nbest_hyps = model.translate(feat, args, train_args.char_list) - new_js[name] = add_results_to_json( - js[name], nbest_hyps, train_args.char_list - ) - - else: - - def grouper(n, iterable, fillvalue=None): - kargs = [iter(iterable)] * n - return zip_longest(*kargs, fillvalue=fillvalue) - - # sort data - keys = list(js.keys()) - feat_lens = [js[key]["output"][1]["shape"][0] for key in keys] - sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i]) - keys = [keys[i] for i in sorted_index] - - with torch.no_grad(): - for names in grouper(args.batchsize, keys, None): - names = [name for name in names if name] - feats = [ - np.fromiter( - map(int, js[name]["output"][1]["tokenid"].split()), - dtype=np.int64, - ) - for name in names - ] - nbest_hyps = model.translate_batch( - feats, - args, - train_args.char_list, - ) - - for i, nbest_hyp in enumerate(nbest_hyps): - name = names[i] - new_js[name] = add_results_to_json( - js[name], nbest_hyp, train_args.char_list - ) - - with open(args.result_label, "wb") as f: - f.write( - json.dumps( - {"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True - ).encode("utf_8") - ) diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/streaming/segment.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/streaming/segment.py deleted file mode 100644 index 45a2758c9d3c86d17cdd55884952e88d0b21240e..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/streaming/segment.py +++ /dev/null @@ -1,129 +0,0 @@ -import numpy as np -import torch - - -class SegmentStreamingE2E(object): - """SegmentStreamingE2E constructor. - - :param E2E e2e: E2E ASR object - :param recog_args: arguments for "recognize" method of E2E - """ - - def __init__(self, e2e, recog_args, rnnlm=None): - self._e2e = e2e - self._recog_args = recog_args - self._char_list = e2e.char_list - self._rnnlm = rnnlm - - self._e2e.eval() - - self._blank_idx_in_char_list = -1 - for idx in range(len(self._char_list)): - if self._char_list[idx] == self._e2e.blank: - self._blank_idx_in_char_list = idx - break - - self._subsampling_factor = np.prod(e2e.subsample) - self._activates = 0 - self._blank_dur = 0 - - self._previous_input = [] - self._previous_encoder_recurrent_state = None - self._encoder_states = [] - self._ctc_posteriors = [] - - assert ( - self._recog_args.batchsize <= 1 - ), "SegmentStreamingE2E works only with batch size <= 1" - assert ( - "b" not in self._e2e.etype - ), "SegmentStreamingE2E works only with uni-directional encoders" - - def accept_input(self, x): - """Call this method each time a new batch of input is available.""" - - self._previous_input.extend(x) - h, ilen = self._e2e.subsample_frames(x) - - # Run encoder and apply greedy search on CTC softmax output - h, _, self._previous_encoder_recurrent_state = self._e2e.enc( - h.unsqueeze(0), ilen, self._previous_encoder_recurrent_state - ) - z = self._e2e.ctc.argmax(h).squeeze(0) - - if self._activates == 0 and z[0] != self._blank_idx_in_char_list: - self._activates = 1 - - # Rerun encoder with zero state at onset of detection - tail_len = self._subsampling_factor * ( - self._recog_args.streaming_onset_margin + 1 - ) - h, ilen = self._e2e.subsample_frames( - np.reshape( - self._previous_input[-tail_len:], [-1, len(self._previous_input[0])] - ) - ) - h, _, self._previous_encoder_recurrent_state = self._e2e.enc( - h.unsqueeze(0), ilen, None - ) - - hyp = None - if self._activates == 1: - self._encoder_states.extend(h.squeeze(0)) - self._ctc_posteriors.extend(self._e2e.ctc.log_softmax(h).squeeze(0)) - - if z[0] == self._blank_idx_in_char_list: - self._blank_dur += 1 - else: - self._blank_dur = 0 - - if self._blank_dur >= self._recog_args.streaming_min_blank_dur: - seg_len = ( - len(self._encoder_states) - - self._blank_dur - + self._recog_args.streaming_offset_margin - ) - if seg_len > 0: - # Run decoder with a detected segment - h = torch.cat(self._encoder_states[:seg_len], dim=0).view( - -1, self._encoder_states[0].size(0) - ) - if self._recog_args.ctc_weight > 0.0: - lpz = torch.cat(self._ctc_posteriors[:seg_len], dim=0).view( - -1, self._ctc_posteriors[0].size(0) - ) - if self._recog_args.batchsize > 0: - lpz = lpz.unsqueeze(0) - normalize_score = False - else: - lpz = None - normalize_score = True - - if self._recog_args.batchsize == 0: - hyp = self._e2e.dec.recognize_beam( - h, lpz, self._recog_args, self._char_list, self._rnnlm - ) - else: - hlens = torch.tensor([h.shape[0]]) - hyp = self._e2e.dec.recognize_beam_batch( - h.unsqueeze(0), - hlens, - lpz, - self._recog_args, - self._char_list, - self._rnnlm, - normalize_score=normalize_score, - )[0] - - self._activates = 0 - self._blank_dur = 0 - - tail_len = ( - self._subsampling_factor - * self._recog_args.streaming_onset_margin - ) - self._previous_input = self._previous_input[-tail_len:] - self._encoder_states = [] - self._ctc_posteriors = [] - - return hyp diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/__init__.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/__init__.py deleted file mode 100644 index b7f177368e62a5578b8706300e101f831a3972ac..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Initialize sub package.""" diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/datasets/__init__.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/shaneweisz/AutoCounterspeech/response_generation/response_generator.py b/spaces/shaneweisz/AutoCounterspeech/response_generation/response_generator.py deleted file mode 100644 index 3c85e77a1f9b156ad084a5024bcfbbfec2a4b390..0000000000000000000000000000000000000000 --- a/spaces/shaneweisz/AutoCounterspeech/response_generation/response_generator.py +++ /dev/null @@ -1,73 +0,0 @@ -from typing import Any, Dict, List -from tqdm import tqdm -from colorama import Fore, Style -from transformers import AutoModelForCausalLM, AutoTokenizer, LogitsProcessorList -import torch -from .min_new_tokens import MinNewTokensLogitsProcessor - - -class ResponseGenerator: - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - def __init__(self, pretrained_model_name_or_path: str, decoding_config: Dict[str, Any], seed=42, verbose=True): - self.model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path).to(self.device) - self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path) - if "pad_token" not in self.tokenizer.special_tokens_map: - self.tokenizer.pad_token = self.tokenizer.eos_token # A pad token needs to be set for batch decoding - self.decoding_config = decoding_config - self.verbose = verbose - torch.manual_seed(seed) - - def generate_responses(self, inputs: List[str], batch_size=1) -> List[str]: - responses = [] - for i in tqdm(range(0, len(inputs), batch_size), disable=not self.verbose): - batch_inputs = inputs[i : i + batch_size] - batch_responses = self.generate_responses_for_batch(batch_inputs) - responses.extend(batch_responses) - return responses - - def generate_responses_for_batch(self, inputs: List[str]) -> str: - inputs = [input_text + self.tokenizer.eos_token for input_text in inputs] - - self.tokenizer.padding_side = "left" - tokenized_inputs = self.tokenizer(inputs, return_tensors="pt", padding=True).to(self.device) - input_len = tokenized_inputs["input_ids"].shape[-1] - - params_for_generate = self._params_for_generate(input_len) - output_ids = self.model.generate( - **tokenized_inputs, **params_for_generate, pad_token_id=self.tokenizer.pad_token_id - ) - - response_ids = output_ids[:, input_len:] - responses = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) - - return responses - - def _params_for_generate(self, input_length: int) -> Dict[str, Any]: - params_for_generate = self.decoding_config.copy() - - if "min_new_tokens" in params_for_generate and params_for_generate["min_new_tokens"] is not None: - # the HuggingFace `generate` function accepts a `logits_processor` argument, not a `min_new_tokens`, - # so we replace `min_new_tokens` from the `decoding_config` with our custom logits processor - # that enforces a minimum response length - min_new_tokens = params_for_generate["min_new_tokens"] - min_new_tokens_logits_processor = MinNewTokensLogitsProcessor( - min_new_tokens, self.tokenizer.eos_token_id, input_length - ) - params_for_generate["logits_processor"] = LogitsProcessorList([min_new_tokens_logits_processor]) - params_for_generate.pop("min_new_tokens") - - return params_for_generate - - def respond(self, input_text: str) -> str: - """Respond to a single hate speech input.""" - return self.generate_responses([input_text])[0] - - def interact(self): - prompt = Fore.RED + "Hate speech: " + Style.RESET_ALL - input_text = input(prompt) - while input_text != "": - print(Fore.GREEN + "Response: " + Style.RESET_ALL, end="") - response = self.respond(input_text) - print(response) - input_text = input(prompt) diff --git a/spaces/sharmaditya/chatapp/README.md b/spaces/sharmaditya/chatapp/README.md deleted file mode 100644 index b3c4504ac790ba305717de7768f2e7d0c062152b..0000000000000000000000000000000000000000 --- a/spaces/sharmaditya/chatapp/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chatapp -emoji: ⚡ -colorFrom: blue -colorTo: yellow -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/app.py b/spaces/shi-labs/Prompt-Free-Diffusion/app.py deleted file mode 100644 index 63b23d6c2b356f5f8d5e66b6bc1e689687dd6895..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/app.py +++ /dev/null @@ -1,539 +0,0 @@ -################################################################################ -# Copyright (C) 2023 Xingqian Xu - All Rights Reserved # -# # -# Please visit Prompt-Free-Diffusion's arXiv paper for more details, link at # -# arxiv.org/abs/2305.16223 # -# # -################################################################################ - -import gradio as gr -import os.path as osp -from PIL import Image -import numpy as np -import time - -import torch -import torchvision.transforms as tvtrans -from lib.cfg_helper import model_cfg_bank -from lib.model_zoo import get_model - -from collections import OrderedDict -from lib.model_zoo.ddim import DDIMSampler - -from huggingface_hub import hf_hub_download - -n_sample_image = 1 - -# controlnet_path = OrderedDict([ -# ['canny' , ('canny' , 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors')], -# ['canny_v11p' , ('canny' , 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors')], -# ['depth' , ('depth' , 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors')], -# ['hed' , ('hed' , 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors')], -# ['softedge_v11p' , ('hed' , 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors')], -# ['mlsd' , ('mlsd' , 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors')], -# # ['mlsd_v11p' , ('mlsd' , 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors')], -# # ['normal' , ('normal' , 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors')], -# ['openpose' , ('openpose', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors')], -# ['openpose_v11p' , ('openpose', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors')], -# ['scribble' , ('scribble', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors')], -# ['seg' , ('none' , 'pretrained/controlnet/control_sd15_seg_slimmed.safetensors')], -# ['lineart_v11p' , ('none' , 'pretrained/controlnet/control_v11p_sd15_lineart_slimmed.safetensors')], -# ['lineart_anime_v11p', ('none' , 'pretrained/controlnet/control_v11p_sd15s2_lineart_anime_slimmed.safetensors')], -# ]) - -controlnet_path = OrderedDict([ - ['canny' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors'))], - # ['canny_v11p' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors'))], - ['depth' , ('depth' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors'))], - ['hed' , ('hed' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors'))], - ['mlsd' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors'))], - # ['mlsd_v11p' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors'))], - # ['normal' , ('normal' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors'))], - # ['openpose' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors'))], - ['openpose_v11p' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors'))], - ['scribble' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors'))], - ['softedge_v11p' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors'))], - ['seg' , ('none' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_seg_slimmed.safetensors'))], - ['lineart_v11p' , ('none' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_lineart_slimmed.safetensors'))], - ['lineart_anime_v11p', ('none' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15s2_lineart_anime_slimmed.safetensors'))], -]) - -preprocess_method = [ - 'canny' , - 'depth' , - 'hed' , - 'mlsd' , - # 'normal' , - 'openpose' , - 'openpose_withface' , - 'openpose_withfacehand', - 'scribble' , - 'none' , -] - -# diffuser_path = OrderedDict([ -# ['SD-v1.5' , 'pretrained/pfd/diffuser/SD-v1-5.safetensors'], -# ['OpenJouney-v4' , 'pretrained/pfd/diffuser/OpenJouney-v4.safetensors'], -# ['Deliberate-v2.0' , 'pretrained/pfd/diffuser/Deliberate-v2-0.safetensors'], -# ['RealisticVision-v2.0', 'pretrained/pfd/diffuser/RealisticVision-v2-0.safetensors'], -# ['Anything-v4' , 'pretrained/pfd/diffuser/Anything-v4.safetensors'], -# ['Oam-v3' , 'pretrained/pfd/diffuser/AbyssOrangeMix-v3.safetensors'], -# ['Oam-v2' , 'pretrained/pfd/diffuser/AbyssOrangeMix-v2.safetensors'], -# ]) - -# ctxencoder_path = OrderedDict([ -# ['SeeCoder' , 'pretrained/pfd/seecoder/seecoder-v1-0.safetensors'], -# ['SeeCoder-PA' , 'pretrained/pfd/seecoder/seecoder-pa-v1-0.safetensors'], -# ['SeeCoder-Anime', 'pretrained/pfd/seecoder/seecoder-anime-v1-0.safetensors'], -# ]) - -diffuser_path = OrderedDict([ - ['SD-v1.5' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/SD-v1-5.safetensors')], - ['OpenJouney-v4' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/OpenJouney-v4.safetensors')], - ['Deliberate-v2.0' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/Deliberate-v2-0.safetensors')], - ['RealisticVision-v2.0', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/RealisticVision-v2-0.safetensors')], - ['Anything-v4' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/Anything-v4.safetensors')], - ['Oam-v3' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/AbyssOrangeMix-v3.safetensors')], - ['Oam-v2' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/diffuser/AbyssOrangeMix-v2.safetensors')], -]) - -ctxencoder_path = OrderedDict([ - ['SeeCoder' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/seecoder/seecoder-v1-0.safetensors')], - ['SeeCoder-PA' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/seecoder/seecoder-pa-v1-0.safetensors')], - ['SeeCoder-Anime', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/seecoder/seecoder-anime-v1-0.safetensors')], -]) - -########## -# helper # -########## - -def highlight_print(info): - print('') - print(''.join(['#']*(len(info)+4))) - print('# '+info+' #') - print(''.join(['#']*(len(info)+4))) - print('') - -def load_sd_from_file(target): - if osp.splitext(target)[-1] == '.ckpt': - sd = torch.load(target, map_location='cpu')['state_dict'] - elif osp.splitext(target)[-1] == '.pth': - sd = torch.load(target, map_location='cpu') - elif osp.splitext(target)[-1] == '.safetensors': - from safetensors.torch import load_file as stload - sd = OrderedDict(stload(target, device='cpu')) - else: - assert False, "File type must be .ckpt or .pth or .safetensors" - return sd - -######## -# main # -######## - -class prompt_free_diffusion(object): - def __init__(self, - fp16=False, - tag_ctx=None, - tag_diffuser=None, - tag_ctl=None,): - - self.tag_ctx = tag_ctx - self.tag_diffuser = tag_diffuser - self.tag_ctl = tag_ctl - self.strict_sd = True - - cfgm = model_cfg_bank()('pfd_seecoder_with_controlnet') - self.net = get_model()(cfgm) - sdvae = hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/vae/sd-v2-0-base-autokl.pth') - sdvae = torch.load(sdvae) - self.net.vae['image'].load_state_dict(sdvae) - - self.action_load_ctx(tag_ctx) - self.action_load_diffuser(tag_diffuser) - self.action_load_ctl(tag_ctl) - - if fp16: - highlight_print('Running in FP16') - self.net.ctx['image'].fp16 = True - self.net = self.net.half() - self.dtype = torch.float16 - else: - self.dtype = torch.float32 - - self.use_cuda = torch.cuda.is_available() - if self.use_cuda: - self.net.to('cuda') - - self.net.eval() - self.sampler = DDIMSampler(self.net) - - self.n_sample_image = n_sample_image - self.ddim_steps = 50 - self.ddim_eta = 0.0 - self.image_latent_dim = 4 - - def load_ctx(self, pretrained): - sd = load_sd_from_file(pretrained) - sd_extra = [(ki, vi) for ki, vi in self.net.state_dict().items() \ - if ki.find('ctx.')!=0] - sd.update(OrderedDict(sd_extra)) - - self.net.load_state_dict(sd, strict=True) - print('Load context encoder from [{}] strict [{}].'.format(pretrained, True)) - - def load_diffuser(self, pretrained): - sd = load_sd_from_file(pretrained) - if len([ki for ki in sd.keys() if ki.find('diffuser.image.context_blocks.')==0]) == 0: - sd = [( - ki.replace('diffuser.text.context_blocks.', 'diffuser.image.context_blocks.'), vi) - for ki, vi in sd.items()] - sd = OrderedDict(sd) - sd_extra = [(ki, vi) for ki, vi in self.net.state_dict().items() \ - if ki.find('diffuser.')!=0] - sd.update(OrderedDict(sd_extra)) - self.net.load_state_dict(sd, strict=True) - print('Load diffuser from [{}] strict [{}].'.format(pretrained, True)) - - def load_ctl(self, pretrained): - sd = load_sd_from_file(pretrained) - self.net.ctl.load_state_dict(sd, strict=True) - print('Load controlnet from [{}] strict [{}].'.format(pretrained, True)) - - def action_load_ctx(self, tag): - pretrained = ctxencoder_path[tag] - if tag == 'SeeCoder-PA': - from lib.model_zoo.seecoder import PPE_MLP - pe_layer = \ - PPE_MLP(freq_num=20, freq_max=None, out_channel=768, mlp_layer=3) - if self.dtype == torch.float16: - pe_layer = pe_layer.half() - if self.use_cuda: - pe_layer.to('cuda') - pe_layer.eval() - self.net.ctx['image'].qtransformer.pe_layer = pe_layer - else: - self.net.ctx['image'].qtransformer.pe_layer = None - if pretrained is not None: - self.load_ctx(pretrained) - self.tag_ctx = tag - return tag - - def action_load_diffuser(self, tag): - pretrained = diffuser_path[tag] - if pretrained is not None: - self.load_diffuser(pretrained) - self.tag_diffuser = tag - return tag - - def action_load_ctl(self, tag): - pretrained = controlnet_path[tag][1] - if pretrained is not None: - self.load_ctl(pretrained) - self.tag_ctl = tag - return tag - - def action_autoset_hw(self, imctl): - if imctl is None: - return 512, 512 - w, h = imctl.size - w = w//64 * 64 - h = h//64 * 64 - w = w if w >=512 else 512 - w = w if w <=1024 else 1024 - h = h if h >=512 else 512 - h = h if h <=1024 else 1024 - return h, w - - def action_autoset_method(self, tag): - return controlnet_path[tag][0] - - def action_inference( - self, im, imctl, ctl_method, do_preprocess, - h, w, ugscale, seed, - tag_ctx, tag_diffuser, tag_ctl,): - - if tag_ctx != self.tag_ctx: - self.action_load_ctx(tag_ctx) - if tag_diffuser != self.tag_diffuser: - self.action_load_diffuser(tag_diffuser) - if tag_ctl != self.tag_ctl: - self.action_load_ctl(tag_ctl) - - n_samples = self.n_sample_image - - sampler = self.sampler - device = self.net.device - - w = w//64 * 64 - h = h//64 * 64 - if imctl is not None: - imctl = imctl.resize([w, h], Image.Resampling.BICUBIC) - - craw = tvtrans.ToTensor()(im)[None].to(device).to(self.dtype) - c = self.net.ctx_encode(craw, which='image').repeat(n_samples, 1, 1) - u = torch.zeros_like(c) - - if tag_ctx in ["SeeCoder-Anime"]: - u = torch.load('assets/anime_ug.pth')[None].to(device).to(self.dtype) - pad = c.size(1) - u.size(1) - u = torch.cat([u, torch.zeros_like(u[:, 0:1].repeat(1, pad, 1))], axis=1) - - if tag_ctl != 'none': - ccraw = tvtrans.ToTensor()(imctl)[None].to(device).to(self.dtype) - if do_preprocess: - cc = self.net.ctl.preprocess(ccraw, type=ctl_method, size=[h, w]) - cc = cc.to(self.dtype) - else: - cc = ccraw - else: - cc = None - - shape = [n_samples, self.image_latent_dim, h//8, w//8] - - if seed < 0: - np.random.seed(int(time.time())) - torch.manual_seed(-seed + 100) - else: - np.random.seed(seed + 100) - torch.manual_seed(seed) - - x, _ = sampler.sample( - steps=self.ddim_steps, - x_info={'type':'image',}, - c_info={'type':'image', 'conditioning':c, 'unconditional_conditioning':u, - 'unconditional_guidance_scale':ugscale, - 'control':cc,}, - shape=shape, - verbose=False, - eta=self.ddim_eta) - - ccout = [tvtrans.ToPILImage()(i) for i in cc] if cc is not None else [] - imout = self.net.vae_decode(x, which='image') - imout = [tvtrans.ToPILImage()(i) for i in imout] - return imout + ccout - -pfd_inference = prompt_free_diffusion( - fp16=True, tag_ctx = 'SeeCoder', tag_diffuser = 'Deliberate-v2.0', tag_ctl = 'canny',) - -################# -# sub interface # -################# - -cache_examples = True - -def get_example(): - case = [ - [ - 'assets/examples/ghibli-input.jpg', - 'assets/examples/ghibli-canny.png', - 'canny', False, - 768, 1024, 1.8, 23, - 'SeeCoder', 'Deliberate-v2.0', 'canny', ], - [ - 'assets/examples/astronautridinghouse-input.jpg', - 'assets/examples/astronautridinghouse-canny.png', - 'canny', False, - 512, 768, 2.0, 21, - 'SeeCoder', 'Deliberate-v2.0', 'canny', ], - [ - 'assets/examples/grassland-input.jpg', - 'assets/examples/grassland-scribble.png', - 'scribble', False, - 768, 512, 2.0, 41, - 'SeeCoder', 'Deliberate-v2.0', 'scribble', ], - [ - 'assets/examples/jeep-input.jpg', - 'assets/examples/jeep-depth.png', - 'depth', False, - 512, 768, 2.0, 30, - 'SeeCoder', 'Deliberate-v2.0', 'depth', ], - [ - 'assets/examples/bedroom-input.jpg', - 'assets/examples/bedroom-mlsd.png', - 'mlsd', False, - 512, 512, 2.0, 31, - 'SeeCoder', 'Deliberate-v2.0', 'mlsd', ], - [ - 'assets/examples/nightstreet-input.jpg', - 'assets/examples/nightstreet-canny.png', - 'canny', False, - 768, 512, 2.3, 20, - 'SeeCoder', 'Deliberate-v2.0', 'canny', ], - [ - 'assets/examples/woodcar-input.jpg', - 'assets/examples/woodcar-depth.png', - 'depth', False, - 768, 512, 2.0, 20, - 'SeeCoder', 'Deliberate-v2.0', 'depth', ], - [ - 'assets/examples-anime/miku.jpg', - 'assets/examples-anime/miku-canny.png', - 'canny', False, - 768, 576, 1.5, 22, - 'SeeCoder-Anime', 'Anything-v4', 'canny', ], - [ - 'assets/examples-anime/random1.jpg', - 'assets/examples-anime/pose_small.png', - 'openpose', False, - 768, 1024, 2.5, 29, - 'SeeCoder-Anime', 'Oam-v2', 'openpose_v11p', ], - [ - 'assets/examples-anime/camping.jpg', - 'assets/examples-anime/pose_small.png', - 'openpose', False, - 768, 1024, 2.0, 38, - 'SeeCoder-Anime', 'Anything-v4', 'openpose_v11p', ], - [ - 'assets/examples-anime/hanfu_girl.jpg', - 'assets/examples-anime/pose_small.png', - 'openpose', False, - 768, 1024, 2.0, 20, - 'SeeCoder-Anime', 'Anything-v4', 'openpose_v11p', ], - ] - return case - -def interface(): - with gr.Row(): - with gr.Column(): - img_input = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') - with gr.Row(): - out_width = gr.Slider(label="Width" , minimum=512, maximum=1024, value=512, step=64, visible=True) - out_height = gr.Slider(label="Height", minimum=512, maximum=1024, value=512, step=64, visible=True) - with gr.Row(): - scl_lvl = gr.Slider(label="CFGScale", minimum=0, maximum=4, value=2, step=0.01, visible=True) - seed = gr.Number(20, label="Seed", precision=0) - with gr.Row(): - tag_ctx = gr.Dropdown(label='Context Encoder', choices=[pi for pi in ctxencoder_path.keys()], value='SeeCoder') - tag_diffuser = gr.Dropdown(label='Diffuser', choices=[pi for pi in diffuser_path.keys()], value='Deliberate-v2.0') - button = gr.Button("Run") - with gr.Column(): - ctl_input = gr.Image(label='Control Input', type='pil', elem_id='customized_imbox') - do_preprocess = gr.Checkbox(label='Preprocess', value=False) - with gr.Row(): - ctl_method = gr.Dropdown(label='Preprocess Type', choices=preprocess_method, value='canny') - tag_ctl = gr.Dropdown(label='ControlNet', choices=[pi for pi in controlnet_path.keys()], value='canny') - with gr.Column(): - img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image+1) - - tag_ctl.change( - pfd_inference.action_autoset_method, - inputs = [tag_ctl], - outputs = [ctl_method],) - - ctl_input.change( - pfd_inference.action_autoset_hw, - inputs = [ctl_input], - outputs = [out_height, out_width],) - - # tag_ctx.change( - # pfd_inference.action_load_ctx, - # inputs = [tag_ctx], - # outputs = [tag_ctx],) - - # tag_diffuser.change( - # pfd_inference.action_load_diffuser, - # inputs = [tag_diffuser], - # outputs = [tag_diffuser],) - - # tag_ctl.change( - # pfd_inference.action_load_ctl, - # inputs = [tag_ctl], - # outputs = [tag_ctl],) - - button.click( - pfd_inference.action_inference, - inputs=[img_input, ctl_input, ctl_method, do_preprocess, - out_height, out_width, scl_lvl, seed, - tag_ctx, tag_diffuser, tag_ctl, ], - outputs=[img_output]) - - gr.Examples( - label='Examples', - examples=get_example(), - fn=pfd_inference.action_inference, - inputs=[img_input, ctl_input, ctl_method, do_preprocess, - out_height, out_width, scl_lvl, seed, - tag_ctx, tag_diffuser, tag_ctl, ], - outputs=[img_output], - cache_examples=cache_examples,) - -############# -# Interface # -############# - -css = """ - #customized_imbox { - min-height: 450px; - } - #customized_imbox>div[data-testid="image"] { - min-height: 450px; - } - #customized_imbox>div[data-testid="image"]>div { - min-height: 450px; - } - #customized_imbox>div[data-testid="image"]>iframe { - min-height: 450px; - } - #customized_imbox>div.unpadded_box { - min-height: 450px; - } - #myinst { - font-size: 0.8rem; - margin: 0rem; - color: #6B7280; - } - #maskinst { - text-align: justify; - min-width: 1200px; - } - #maskinst>img { - min-width:399px; - max-width:450px; - vertical-align: top; - display: inline-block; - } - #maskinst:after { - content: ""; - width: 100%; - display: inline-block; - } -""" - -if True: - with gr.Blocks(css=css) as demo: - gr.HTML( - """ -
      -

      - Prompt-Free Diffusion -

      -

      - Xingqian Xu1,5, Jiayi Guo1,2, Zhangyang Wang3,5, Gao Huang2, Irfan Essa4, and Humphrey Shi1,5 -

      -

      - 1SHI Labs @ UIUC & Oregon, 2Tsinghua University, 3UT Austin, 4Georgia Tech, 5Picsart AI Research (PAIR) -

      -

      - The performance of Text2Image is largely dependent on text prompts. - In Prompt-Free Diffusion, no prompt is needed, just a reference images! - At the core of Prompt-Free Diffusion is an image-only semantic context encoder (SeeCoder). - SeeCoder is reusable to most CLIP-based T2I models: just drop in and replace CLIP, then you will create your own prompt-free diffusion. - [Github] [arXiv] -

      -
      - """) - - interface() - - # gr.HTML( - # """ - #
      - #

      - # Version: {} - #

      - #
      - # """.format(' '+str(pfd_inference.pretrained))) - - # demo.launch(server_name="0.0.0.0", server_port=7992) - # demo.launch() - demo.launch(debug=True) diff --git a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/data/__init__.py b/spaces/shivammehta25/Diff-TTSG/diff_ttsg/data/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/japanese.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/9Apps Yukle A Safe and Secure App Store for Android Devices.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/9Apps Yukle A Safe and Secure App Store for Android Devices.md deleted file mode 100644 index fb81870e72cb5b368dfd5be40182d989b8680679..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/9Apps Yukle A Safe and Secure App Store for Android Devices.md +++ /dev/null @@ -1,215 +0,0 @@ -
      -

      9apps yukle: How to Download and Use the Best Android App Store

      -

      If you are looking for a way to discover and download the best apps and games for your Android device, you might want to try out 9apps yukle. This is a powerful and versatile app store that offers you a wide range of useful applications, from entertainment to productivity, from social media to education, and more. In this article, we will show you what 9apps yukle is, why you need it, how to download and use it on your Android device, and how to run it on your PC or Mac with BlueStacks. We will also share with you some of the best apps and games you can find on 9apps yukle, so you can enjoy them on your device or computer.

      -

      What is 9apps yukle and why you need it

      -

      9apps yukle is a tools app developed by 9Apps that serves as an alternative app store for Android users. Unlike the default Google Play Store, 9apps yukle offers you more choices, more features, and more benefits when it comes to finding and downloading apps and games. Here are some of the reasons why you need 9apps yukle:

      -

      9apps yukle


      Download Ziphttps://ssurll.com/2uNXBY



      -
        -
      • It has a huge collection of apps and games in various categories and genres, so you can always find something that suits your needs and preferences.
      • -
      • It has a smart recommendation system that suggests you the best apps and games based on your interests, habits, and ratings.
      • -
      • It has a fast and easy download process that saves you time and data. You can also pause and resume your downloads at any time.
      • -
      • It has a user-friendly interface that makes it easy to navigate, search, and manage your apps and games.
      • -
      • It has regular updates that keep your apps and games up-to-date with the latest features and bug fixes.
      • -
      • It has a safe and secure platform that protects your device from malware, viruses, and other threats.
      • -
      -

      How to download and install 9apps yukle on your Android device

      -

      Downloading and installing 9apps yukle on your Android device is very simple. Just follow these steps:

      -
        -
      1. Go to the official website of 9apps yukle (https://www.9appsyukle.com/) or scan the QR code below with your device's camera.
      2. -QR code for 9apps yukle website -
      3. Tap on the "Download" button to start downloading the APK file of 9apps yukle.
      4. -
      5. Once the download is complete, open the APK file from your device's file manager or notification bar.
      6. -
      7. If prompted, enable the "Unknown sources" option in your device's settings to allow the installation of apps from sources other than Google Play Store.Follow the on-screen instructions to complete the installation of 9apps yukle.
      8. -
      9. Launch 9apps yukle from your device's app drawer or home screen.
      10. -
      -

      Congratulations! You have successfully installed 9apps yukle on your Android device. Now you can start exploring and downloading the best apps and games for your device.

      -

      How to use 9apps yukle to find and download the best apps and games

      -

      Using 9apps yukle to find and download the best apps and games is very easy and fun. Here are some tips on how to use 9apps yukle:

      -

      9apps yukle android
      -9apps yukle pc
      -9apps yukle mac
      -9apps yukle bluestacks
      -9apps yukle apk
      -9apps yukle indir
      -9apps yukle pulsuz
      -9apps yukle bedava
      -9apps yukle download
      -9apps yukle free
      -9apps yukle online
      -9apps yukle app
      -9apps yukle games
      -9apps yukle tools
      -9apps yukle wallpapers
      -9apps yukle ringtones
      -9apps yukle stickers
      -9apps yukle themes
      -9apps yukle videos
      -9apps yukle music
      -9apps yukle whatsapp
      -9apps yukle facebook
      -9apps yukle instagram
      -9apps yukle tiktok
      -9apps yukle snapchat
      -9apps yukle youtube
      -9apps yukle netflix
      -9apps yukle spotify
      -9apps yukle amazon
      -9apps yukle ebay
      -9apps yukle aliexpress
      -9apps yukle uber
      -9apps yukle airbnb
      -9apps yukle zoom
      -9apps yukle skype
      -9apps yukle gmail
      -9apps yukle google
      -9apps yukle chrome
      -9apps yukle firefox
      -9apps yukle opera
      -9apps yukle vpn
      -9apps yukle antivirus
      -9apps yukle cleaner
      -9apps yukle booster
      -9apps yukle battery saver
      -9apps yukle file manager
      -9apps yukle photo editor
      -9apps yukle video editor
      -9apps yukle pdf reader

      -
        -
      • On the home screen of 9apps yukle, you can see the featured apps and games, the latest updates, the top charts, and the categories. You can swipe left or right to browse through them.
      • -
      • You can also use the search bar at the top to type in the name or keyword of the app or game you are looking for.
      • -
      • When you find an app or game that interests you, you can tap on it to see more details, such as the description, screenshots, ratings, reviews, and related apps and games.
      • -
      • If you want to download an app or game, you can tap on the "Download" button at the bottom. You can also tap on the "Share" button to share the app or game with your friends via social media, email, or other apps.
      • -
      • You can check the progress of your downloads by tapping on the "Downloads" icon at the bottom right corner of the screen. You can also pause, resume, or cancel your downloads from there.
      • -
      • You can manage your downloaded apps and games by tapping on the "Apps" icon at the bottom left corner of the screen. You can also uninstall, update, or move your apps and games from there.
      • -
      -

      That's it! You have learned how to use 9apps yukle to find and download the best apps and games for your Android device. Enjoy!

      -

      The advantages of using 9apps yukle on PC and Mac

      -

      If you want to enjoy the best apps and games on a bigger screen, with better graphics, sound, and performance, you might want to try using 9apps yukle on your PC or Mac. This is possible with the help of an Android emulator called BlueStacks. BlueStacks is a software that allows you to run Android apps and games on your PC or Mac as if they were native applications. Here are some of the advantages of using 9apps yukle on PC and Mac with BlueStacks:

      -
        -
      • You can access a larger collection of apps and games that are not available or compatible with your Android device.
      • -
      • You can play Android games with better graphics, sound, and performance, without lagging or crashing.
      • -
      • You can use your keyboard, mouse, or gamepad to control your Android games, which can give you an edge over other players.
      • -
      • You can multitask and run multiple apps and games at the same time on different windows or tabs.
      • -
      • You can backup and sync your data and settings across your Android device and PC or Mac with Google Play Services.
      • -

      How to run 9apps yukle on your PC or Mac with BlueStacks

      -

      Running 9apps yukle on your PC or Mac with BlueStacks is very easy and convenient. Just follow these steps:

      -
        -
      1. Download and install BlueStacks on your PC or Mac from the official website (https://www.bluestacks.com/) or from the links below .
      2. -
      3. Launch BlueStacks and sign in with your Google account or create a new one.
      4. -
      5. Go to the "My games" tab and click on the "Install apk" button at the bottom right corner of the screen.
      6. -
      7. Select the APK file of 9apps yukle that you have downloaded earlier and wait for it to install.
      8. -
      9. Once the installation is complete, you can see 9apps yukle on your BlueStacks home screen. Click on it to launch it.
      10. -
      -

      Congratulations! You have successfully run 9apps yukle on your PC or Mac with BlueStacks. Now you can enjoy the best apps and games on your computer.

      -

      The benefits of using BlueStacks to play Android games on your PC or Mac

      -

      Using BlueStacks to play Android games on your PC or Mac has many benefits that can enhance your gaming experience. Here are some of them:

      -
        -
      • You can use the BlueStacks Game Controls feature to customize your keyboard, mouse, or gamepad settings for each game. You can also use the pre-made game controls for popular games or create your own.
      • -
      • You can use the BlueStacks Eco Mode feature to optimize your CPU and RAM usage and reduce power consumption while playing multiple games at the same time.
      • -
      • You can use the BlueStacks Multi-Instance feature to create and run multiple instances of BlueStacks with different accounts, settings, and apps. You can also sync your actions across all instances with the Multi-Instance Sync feature.
      • -
      • You can use the BlueStacks Macros feature to record and replay your actions in any game with a single keystroke. You can also edit, share, and import macros from other users.
      • -
      • You can use the BlueStacks Screen Recorder feature to capture and save your gameplay videos in high quality. You can also stream your gameplay live to Twitch, YouTube, Facebook, or other platforms with the BlueStacks Streaming Mode feature.
      • -
      -

      How to customize your BlueStacks settings for optimal performance and experience

      -

      To customize your BlueStacks settings for optimal performance and experience, you can follow these tips:

      -
        -
      • Go to the "Settings" menu by clicking on the gear icon on the side toolbar. You can adjust various settings such as display, sound, engine, preferences, game controls, and more.
      • -
      • For display settings, you can choose the resolution, orientation, and DPI of your BlueStacks window. You can also enable or disable fullscreen mode, high frame rates, and notifications.
      • -
      • For sound settings, you can adjust the volume of your speakers and microphone. You can also enable or disable sound effects and voice chat.
      • -
      • For engine settings, you can choose the performance mode, graphics mode, graphics engine, and memory allocation of your BlueStacks. You can also enable or disable virtualization technology, ASTC texture, and ABI setting.
      • -
      • For preferences settings, you can choose the language, location, keyboard layout, and time zone of your BlueStacks. You can also enable or disable auto-updates, app notifications, app center recommendations, data backup, and disk cleanup.
      • -
      • For game controls settings, you can customize your keyboard, mouse, or gamepad settings for each game. You can also enable or disable game guidance, smart controls, MOBA mode, shooting mode, and aim pan mode.
      • -

      The best apps and games you can find on 9apps yukle

      -

      One of the best things about 9apps yukle is that it has a huge collection of apps and games in various categories and genres. You can find apps and games for entertainment, productivity, social media, education, health, lifestyle, and more. You can also find apps and games for different age groups, interests, and skill levels. Here are some of the best apps and games you can find on 9apps yukle:

      -

      The top categories and genres of apps and games on 9apps yukle

      -

      According to the statistics of 9apps yukle, the top categories and genres of apps and games on 9apps yukle are as follows:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      CategoryGenreExample
      EntertainmentVideo Players & EditorsVidMate, MX Player, KineMaster
      ProductivityToolsShareit, Xender, CamScanner
      Social MediaCommunicationWhatsApp, Facebook, Instagram
      EducationEducationDuolingo, Khan Academy, Udemy
      HealthFitnessNoom, Fitbit, Calm
      LifestyleShoppingAmazon, Flipkart, AliExpress
      GamesActionPUBG Mobile, Free Fire, Call of Duty Mobile
      GamesCasualCandy Crush Saga, Subway Surfers, Temple Run 2
      GamesPuzzleToon Blast, Brain Out, Cut the Rope 2
      -

      These are some of the most popular and widely used categories and genres of apps and games on 9apps yukle. You can explore more categories and genres by tapping on the "More" button on the home screen of 9apps yukle.

      -

      The most popular and trending apps and games on 9apps yukle

      -

      Another way to find the best apps and games on 9apps yukle is to check out the most popular and trending apps and games on 9apps yukle. These are the apps and games that have the most downloads, ratings, reviews, and recommendations from other users. You can see the most popular and trending apps and games on 9apps yukle by tapping on the "Top" button on the home screen of 9apps yukle. Here are some of the most popular and trending apps and games on 9apps yukle:

      -
        -
      • VidMate: A powerful video downloader that allows you to download videos from YouTube, Facebook, Instagram, and other platforms in various formats and resolutions.
      • -
      • PUBG Mobile: A thrilling battle royale game that pits you against 99 other players in a fight for survival. You can play solo, duo, or squad mode, and customize your weapons, vehicles, outfits, and more.
      • -
      • WhatsApp: A free messaging and calling app that lets you communicate with your friends and family across the world. You can also send photos, videos, documents, stickers, voice messages, and more.
      • -
      • Candy Crush Saga: A sweet and addictive puzzle game that challenges you to match three or more candies of the same color and clear the board. You can also play with your friends and compete for the highest score.
      • -
      • Duolingo: A fun and effective language learning app that teaches you a new language through bite-sized lessons, games, quizzes, and stories. You can choose from over 30 languages and track your progress.
      • -
      -

      The hidden gems and underrated apps and games on 9apps yukle

      -

      Besides the most popular and trending apps and games on 9apps yukle, there are also some hidden gems and underrated apps and games on 9apps yukle that deserve your attention. These are the apps and games that have great quality, features, and potential, but are not as well-known or appreciated as they should be. You can discover these hidden gems and underrated apps and games on 9apps yukle by tapping on the "Discover" button on the home screen of 9apps yukle. Here are some of the hidden gems and underrated apps and games on 9apps yukle:

      -
        -
      • CamScanner: A handy scanner app that turns your device's camera into a scanner. You can scan documents, receipts, notes, photos, and more, and save them as PDF or JPG files. You can also edit, share, print, or sync your scans.
      • -
      • Free Fire: A fast-paced battle royale game that offers you a 10-minute survival experience. You can choose your landing spot, loot weapons and items, shoot enemies, and be the last one standing.
      • -
      • Instagram: A popular social media app that lets you share your photos and videos with your followers. You can also apply filters, stickers, effects, and more to your posts. You can also follow your favorite celebrities, brands, influencers, and more.
      • -
      • Toon Blast: A fun and colorful puzzle game that takes you to a cartoon world full of adventures. You can match cubes of the same color to blast them away, use boosters to overcome obstacles, and complete levels with your friends.
      • -
      • Udemy: A learning platform that offers you thousands of courses on various topics, such as business, design, photography, programming, personal development, and more. You can learn from expert instructors at your own pace.
      • -
      -

      Conclusion

      -

      In conclusion, 9apps yukle is a great app store for Android users who want to discover and download the best apps and games for their device. It has a huge collection of apps and games in various categories and genres, a smart recommendation system, a fast and easy download process, a user-friendly interface, regular updates, and a safe and secure platform. It also allows you to run Android apps and games on your PC or Mac with BlueStacks, which offers you many benefits such as better graphics, sound, and performance, keyboard, mouse, and gamepad controls, multitasking, backup and sync, and more. You can also find the best apps and games on 9apps yukle by checking out the top categories and genres, the most popular and trending apps and games, and the hidden gems and underrated apps and games. We hope you enjoyed this article and learned something new about 9apps yukle. If you are interested in trying out 9apps yukle and BlueStacks, you can download them from the links below. Happy downloading!

      -

      FAQs

      -

      Here are some of the frequently asked questions about 9apps yukle and BlueStacks:

      -
        -
      1. Is 9apps yukle free to use?
      2. -

        Yes, 9apps yukle is free to use. You can download and install it on your Android device without any cost. You can also download and use any app or game on 9apps yukle for free.

        -
      3. Is 9apps yukle safe to use?
      4. -

        Yes, 9apps yukle is safe to use. It has a strict security system that scans and verifies every app and game before uploading it to the platform. It also protects your device from malware, viruses, and other threats.

        -
      5. Is BlueStacks free to use?
      6. -

        Yes, BlueStacks is free to use. You can download and install it on your PC or Mac without any cost. You can also run any Android app or game on BlueStacks for free.

        -
      7. Is BlueStacks safe to use?
      8. -

        Yes, BlueStacks is safe to use. It has a reliable security system that ensures your privacy and data protection. It also complies with the Google Play policies and terms of service.

        -
      9. How can I contact the support team of 9apps yukle or BlueStacks?
      10. -

        If you have any questions, issues, or feedback about 9apps yukle or BlueStacks, you can contact their support team by visiting their official websites (https://www.9appsyukle.com/ or https://www.bluestacks.com/) and clicking on the "Contact us" or "Support" button. You can also email them at support@9appsyukle.com or support@bluestacks.com.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CarX Drift Racing 2 The Ultimate Drifting Game for PC and Mac.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CarX Drift Racing 2 The Ultimate Drifting Game for PC and Mac.md deleted file mode 100644 index 21b7658097b5723e393fece068fbe0a8c734ad4c..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CarX Drift Racing 2 The Ultimate Drifting Game for PC and Mac.md +++ /dev/null @@ -1,163 +0,0 @@ - -

      CarX Drift Racing 2 PC Download: How to Play the Best Mobile Drift Game on Your Computer

      -

      If you are a fan of drifting and racing games, you might have heard of CarX Drift Racing 2, one of the most popular and realistic mobile drift games available. But did you know that you can also play this game on your PC and enjoy its stunning graphics, physics, and features on a bigger screen? In this article, we will show you how to download and play CarX Drift Racing 2 on your computer, as well as some tips and tricks to help you master the game.

      -

      carx drift racing 2 pc download


      DOWNLOADhttps://ssurll.com/2uNUwi



      -

      What is CarX Drift Racing 2?

      -

      CarX Drift Racing 2 is a racing game developed by CarX Technologies, LLC, a Russian company that specializes in creating realistic car physics engines. The game was released in 2018 as a sequel to the original CarX Drift Racing, which has over 100 million downloads worldwide. CarX Drift Racing 2 offers an unprecedented and realistic experience of driving real sports cars on one of many race tracks available throughout the game. Here are some of its key features:

      -

      A sequel of the most desired drift game

      -

      CarX Drift Racing 2 builds on the success of its predecessor by adding more content, modes, and features to the game. The game has over 80 cars to choose from, each with its own characteristics, sound, and behavior. You can customize your car with various parts, body kits, vinyls, and colors to create your own unique style. You can also tune your car's performance by adjusting the suspension, engine, gearbox, brakes, differential, and more. You can show off your skills and creativity in different modes, such as story mode, multiplayer mode, ghost mode, XDS mode, top-32 mode, and more.

      -

      A realistic and immersive experience of driving real sports cars

      -

      CarX Drift Racing 2 features a physics engine that accurately simulates the behavior of cars on the track, making the drifting experience more realistic and challenging. You can see how tire pressure, weight distribution, speed, angle, and other factors affect your car's handling and stability. You can also feel the difference between driving on different surfaces, such as asphalt, sand, grass, snow, and more. The game also has stunning graphics and sound design that enhance the overall gaming experience. The graphics are detailed and realistic, with high-quality textures, lighting, and visual effects that create a sense of immersion. The sound is also authentic and immersive, with realistic engine noises, tire screeches, exhaust sounds, and more.

      -

      A variety of gameplay options and features

      -

      CarX Drift Racing 2 offers a variety of gameplay options and features that make it one of the most diverse and enjoyable mobile racing games available. You can choose from different modes to suit your preferences and goals. For example:

      -
        -
      • Story mode: You can progress through a series of challenges and events to unlock new cars and customization options.
      • -
      • Multiplayer mode: You can compete against real players from around the world in online championships and tournaments. You can also join or create online rooms to

        drift with your friends or other players.

      • -
      • Ghost mode: You can race against your own best results or the results of other players to improve your skills and rankings.
      • -
      • XDS mode: You can practice tandem drifting with a virtual partner that mimics your actions on the track. You can also switch roles and try to follow your partner's moves.
      • -
      • Top-32 mode: You can participate in a knockout tournament where you have to beat 32 opponents in a row to win the grand prize.
      • -
      -

      The game also has other features that make it more fun and engaging, such as:

      -

      carx drift racing 2 pc emulator
      -carx drift racing 2 pc gameplay
      -carx drift racing 2 pc requirements
      -carx drift racing 2 pc free download
      -carx drift racing 2 pc online rooms
      -carx drift racing 2 pc mod apk
      -carx drift racing 2 pc cheats
      -carx drift racing 2 pc bluestacks
      -carx drift racing 2 pc windows 10
      -carx drift racing 2 pc steam
      -carx drift racing 2 pc controller support
      -carx drift racing 2 pc graphics settings
      -carx drift racing 2 pc multiplayer mode
      -carx drift racing 2 pc best cars
      -carx drift racing 2 pc tuning guide
      -carx drift racing 2 pc update
      -carx drift racing 2 pc review
      -carx drift racing 2 pc tips and tricks
      -carx drift racing 2 pc keyboard controls
      -carx drift racing 2 pc system requirements
      -carx drift racing 2 pc download size
      -carx drift racing 2 pc ldplayer
      -carx drift racing 2 pc mumu player
      -carx drift racing 2 pc nox player
      -carx drift racing 2 pc game loop
      -carx drift racing 2 pc crack
      -carx drift racing 2 pc full version
      -carx drift racing 2 pc offline mode
      -carx drift racing 2 pc custom vinyls
      -carx drift racing 2 pc how to play
      -carx drift racing 2 pc new update
      -carx drift racing 2 pc trailer
      -carx drift racing 2 pc vs mobile
      -carx drift racing 2 pc reddit
      -carx drift racing 2 pc discord server
      -carx drift racing 2 pc download link
      -carx drift racing 2 pc install guide
      -carx drift racing 2 pc video settings
      -carx drift racing 2 pc sound effects
      -carx drift racing 2 pc music tracks
      -carx drift racing 2 pc bugs and glitches
      -carx drift racing 2 pc patch notes
      -carx drift racing 2 pc codes and coupons
      -carx drift racing 2 pc screenshots and wallpapers
      -carx drift racing 2 pc minimum specs
      -carx drift racing 2 pc recommended specs

      -
        -
      • CarX Drift Racing Online: You can connect your game account to the CarX Drift Racing Online PC game and sync your progress, cars, and customization options.
      • -
      • CarX Highway Racing: You can access a special mode where you can race on highways with traffic and police chases.
      • -
      • CarX Rally: You can access a special mode where you can race on dirt roads with rally cars and challenges.
      • -
      • CarX Network: You can join the CarX community and interact with other players, share your videos and screenshots, get news and updates, and more.
      • -
      -

      Why play CarX Drift Racing 2 on PC?

      -

      While CarX Drift Racing 2 is a great game to play on mobile devices, it can be even better to play it on PC. Here are some of the benefits of playing CarX Drift Racing 2 on PC:

      -

      Better graphics and performance

      -

      Playing CarX Drift Racing 2 on PC allows you to enjoy the game's graphics and performance at their best. You can adjust the graphics settings to suit your preferences and hardware capabilities, and enjoy smooth and stable gameplay without lag or crashes. You can also play the game in full-screen mode and appreciate the game's details and effects on a larger screen.

      -

      Customizable controls and settings

      -

      Playing CarX Drift Racing 2 on PC gives you more options to customize your controls and settings. You can use your keyboard and mouse, or connect a gamepad or a steering wheel to enhance your gaming experience. You can also change the key mapping, sensitivity, camera angle, sound volume, language, and more according to your liking.

      -

      Multitasking and macro features

      -

      Playing CarX Drift Racing 2 on PC enables you to multitask and use macro features that are not available on mobile devices. You can run multiple instances of the game at the same time and switch between them easily. You can also record and replay your actions using macros, which can help you automate tasks, save time, and improve your performance.

      -

      How to download and play CarX Drift Racing 2 on PC?

      -

      There are two main ways to download and play CarX Drift Racing 2 on PC: using an emulator or using Steam. Here is how to do it:

      -

      Using an emulator

      -

      An emulator is a software that allows you to run mobile apps on your PC. There are many emulators available for free online, but some of the most popular ones for playing CarX Drift Racing 2 are BlueStacks, LDPlayer, and MuMu Player. Here is how to use them:

      -

      BlueStacks

      -
        -
      1. Download BlueStacks from its official website here.
      2. -
      3. Install BlueStacks on your PC by following the instructions.
      4. -
      5. Launch BlueStacks and sign in with your Google account.
      6. -
      7. Go to the Google Play Store app on BlueStacks and search for CarX Drift Racing 2.
      8. -
      9. Click on Install and wait for the game to download.
      10. -
      11. Click on Open and enjoy playing CarX Drift Racing 2 on PC.
      12. -
      -

      LDPlayer

      -
        -
      1. Download LDPlayer from its official website here.
      2. -
      3. Install LDPlayer on your PC by following the instructions.
      4. -
      5. Launch LDPlayer and sign in with your Google account.
      6. -
      7. Go to the LD Store app on LDPlayer and search for CarX Drift Racing 2.
      8. -
      9. Click on Install and wait for the game to download.
      10. -
      11. Click on Open and enjoy playing CarX Drift Racing 2 on PC.
      12. -
      -

      MuMu Player

      -
        -
      1. Download MuMu Player from its official website here.
      2. -
      3. Install MuMu Player on your PC by following the instructions.
      4. -
      5. Launch MuMu Player and sign in with your Google account.
      6. -
      7. Go to the Google Play Store app on MuMu Player and search for CarX Drift Racing 2.
      8. -
      9. Click on Install and wait for the game to download.
      10. -
      11. Click on Open and enjoy playing CarX Drift Racing 2 on PC.
      12. -
      -

      Using Steam

      -

      Steam is a digital distribution platform that allows you to buy and play PC games online. CarX Drift Racing 2 is also available on Steam, but it is not the same as the mobile version. It is a separate game called CarX Drift Racing Online, which is compatible with the mobile version through the CarX Network feature. Here is how to use Steam:

      -
        -
      1. Download Steam from its official website here.
      2. -
      3. Install Steam on your PC by following the instructions.
      4. -
      5. Launch Steam and sign in with your Steam account.
      6. -
      7. Go to the Store tab and search for CarX Drift Racing Online.
      8. -
      9. Click on Add to Cart and proceed to checkout.
      10. -
      11. After purchasing the game, go to the Library tab and click on CarX Drift Racing Online.
      12. -
      13. Click on Play and enjoy playing CarX Drift Racing 2 on PC.
      14. -
      -

      Tips and tricks to drift like a pro in CarX Drift Racing 2

      -

      Now that you know how to download and play CarX Drift Racing 2 on PC, you might want to learn some tips and tricks to improve your skills and performance in the game. Here are some of them:

      -

      Upgrade your car

      -

      One of the most important things to do in CarX Drift Racing 2 is to upgrade your car. Upgrading your car can improve its speed, acceleration, handling, braking, and stability, which can make a big difference in your drifting performance. You can upgrade your car by using coins or gold, which you can earn by completing events, challenges, or achievements, or by purchasing them with real money. You can also unlock new cars by progressing through the story mode or by buying them with gold. You should try to upgrade your car as much as possible and experiment with different cars to find the one that suits your style and preference.

      -

      Master drifting

      -

      The core gameplay of CarX Drift Racing 2 is drifting, which is a technique of driving where you intentionally oversteer your car and make it slide sideways. Drifting can be fun and rewarding, but it can also be challenging and tricky. You need to master drifting in order to score high points, win races, and progress in the game. You can master drifting by practicing a lot, learning the basics, and applying some tips, such as:

      -
        -
      • Use the handbrake: The handbrake is a useful tool for initiating and maintaining drifts. You can use it to quickly change direction, adjust your angle, or extend your drift. You can also use it to perform advanced maneuvers, such as feints, flicks, or reverse entries.
      • -
      • Control your throttle: The throttle is another essential tool for drifting. You can use it to control your speed, balance, and momentum. You should try to maintain a steady throttle while drifting, and avoid over-revving or under-revving your engine. You should also use it to accelerate out of corners and gain more points.
      • -
      • Adjust your steering: The steering is the final tool for drifting. You can use it to steer your car into and out of drifts, as well as to correct your mistakes. You should try to steer smoothly and precisely, and avoid oversteering or understeering your car. You should also use it to follow the racing line and hit the clipping points.
      • -
      -

      Use XDS mode to practice tandem drifting

      -

      XDS mode is a unique feature of CarX Drift Racing 2 that allows you to practice tandem drifting with a virtual partner. Tandem drifting is a form of drifting where two or more cars drift together in close proximity and synchronization. Tandem drifting can be very exciting and challenging, but it can also be very difficult and risky. You need to have good skills, coordination, and communication with your partner in order to perform well. You can use XDS mode to practice tandem drifting by choosing one of the two roles: leader or follower. As a leader, you have to set the pace and direction of the drift, while as a follower, you have to match the leader's moves as closely as possible. You can switch roles at any time and try to improve your skills and teamwork. You can also use XDS mode to compete against other players online and see who can drift better in tandem.

      -

      Compete in online championships and tournaments

      -

      One of the most exciting and rewarding aspects of CarX Drift Racing 2 is competing in online championships and tournaments. Online championships and tournaments are events where you can race against other players from around the world and earn prizes, rankings, and fame. You can choose from different types of events, such as solo, team, or tandem, and different levels of difficulty, such as amateur, pro, or legend. You can also join or create your own club and team up with other players to participate in club battles and wars. You can compete in online championships and tournaments by following these steps:

      -
        -
      1. Go to the Online tab and select Championships or Tournaments.
      2. -
      3. Choose an event that suits your preference and skill level.
      4. -
      5. Enter the event and wait for the countdown to start.
      6. -
      7. Race against other players on the selected track and try to score as many points as possible.
      8. -
      9. Check your results and rankings at the end of the race.
      10. -
      11. Claim your rewards and enjoy your achievements.
      12. -
      -

      Conclusion

      -

      CarX Drift Racing 2 is a fantastic game for drifting and racing enthusiasts. It offers a realistic and immersive experience of driving real sports cars on various tracks and modes. It also has a lot of content, features, and options to customize your car and gameplay. You can play this game on your mobile device, but you can also play it on your PC and enjoy its benefits. You can download and play CarX Drift Racing 2 on PC by using an emulator or Steam. You can also improve your skills and performance by following some tips and tricks, such as upgrading your car, mastering drifting, using XDS mode, and competing in online championships and tournaments. If you are looking for a fun and challenging drift game to play on your PC, you should definitely try CarX Drift Racing 2.

      -

      FAQs

      -

      Here are some frequently asked questions about CarX Drift Racing 2:

      -
        -
      • Q: How do I get more coins and gold in CarX Drift Racing 2?
      • -
      • A: You can get more coins and gold by completing events, challenges, achievements, or daily tasks. You can also watch ads or buy them with real money.
      • -
      • Q: How do I unlock new tracks in CarX Drift Racing 2?
      • -
      • A: You can unlock new tracks by progressing through the story mode or by buying them with gold.
      • -
      • Q: How do I sync my progress between CarX Drift Racing 2 mobile version and CarX Drift Racing Online PC version?
      • -
      • A: You can sync your progress by connecting your game account to the CarX Network feature. You can do this by going to the Settings tab and selecting CarX Network.
      • -
      • Q: How do I change my car's color in CarX Drift Racing 2?
      • -
      • A: You can change your car's color by going to the Garage tab and selecting Paint Shop. You can choose from different colors, shades, glosses, or vinyls.
      • -
      • Q: How do I drift better in CarX Drift Racing 2?
      • -
      • A: You can drift better by practicing a lot, learning the basics, and applying some tips, such as using the handbrake, controlling your throttle, adjusting your steering, following the racing line, hitting the clipping points, upgrading your car, tuning your car's performance, choosing the right car for each track, using XDS mode, watching replays or tutorials, or asking for advice from other players.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 Mod Menu Xbox One USB Download 2022 Top 5 Choices for Offline and Online.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 Mod Menu Xbox One USB Download 2022 Top 5 Choices for Offline and Online.md deleted file mode 100644 index 8eb2b4bbfefd562a9affc8dd79358caedf8ca18d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 Mod Menu Xbox One USB Download 2022 Top 5 Choices for Offline and Online.md +++ /dev/null @@ -1,189 +0,0 @@ -
      -

      GTA 5 Mod Menu Xbox One USB Download 2022: How to Install and Use

      -

      If you are looking for a way to spice up your GTA 5 gameplay on Xbox One, you might want to try using a mod menu. A mod menu is a tool that allows you to access and customize various features and options in the game, such as money, weapons, vehicles, missions, cheats, and more. In this article, we will show you how to download and install a GTA 5 mod menu for Xbox One using a USB drive, as well as how to use it safely and effectively.

      -

      gta 5 mod menu xbox one usb download 2022


      Download >>>>> https://ssurll.com/2uNUjT



      -

      What is a GTA 5 mod menu?

      -

      A GTA 5 mod menu is a software that modifies the game files and adds new functions and commands to the game. A mod menu can be accessed by pressing a certain button or combination of buttons on your controller, which will open up a menu with different categories and options. You can then select and activate the features that you want to use in the game, such as:

      -
        -
      • Adding unlimited money to your account
      • -
      • Spawning any vehicle or weapon that you want
      • -
      • Changing your character's appearance and skills
      • -
      • Teleporting to any location on the map
      • -
      • Enabling invincibility, super speed, super jump, and other cheats
      • -
      • Completing missions and unlocking achievements
      • -
      • And much more!
      • -
      -

      Benefits of using a GTA 5 mod menu

      -

      Using a GTA 5 mod menu can have many benefits, such as:

      -
        -
      • Enhancing your gaming experience and having more fun
      • -
      • Exploring new possibilities and scenarios in the game
      • -
      • Customizing your gameplay according to your preferences
      • -
      • Saving time and effort by skipping tedious tasks
      • -
      • Impressing your friends and other players online
      • -
      -

      Risks of using a GTA 5 mod menu

      -

      However, using a GTA 5 mod menu also comes with some risks, such as:

      -
        -
      • Breaching the terms of service and end-user license agreement of Rockstar Games
      • -
      • Getting banned from playing online or accessing certain features
      • -
      • Corrupting your game files or console system
      • -
      • Exposing your device to viruses or malware
      • -
      • Ruining the balance and challenge of the game
      • -
      -

      How to download a GTA 5 mod menu for Xbox One

      -

      If you want to download and install a GTA 5 mod menu for Xbox One, you will need the following:

      -

      gta 5 xbox mod menu tutorial 2022
      -gta 5 xbox one mods usb free download
      -gta 5 xbox series x mod menu download
      -gta 5 ps4 mod menu usb no jailbreak 2022
      -gta 5 ps5 mod menu install guide
      -gta 5 online mod menu xbox one 2022
      -gta 5 modest menu kiddion download
      -gta 5 best mod menus for xbox one
      -gta 5 xbox one mod menu after patch
      -gta 5 xbox one mod menu sharkmodz
      -gta 5 xbox one usb mods offline
      -gta 5 xbox one mod menu no survey
      -gta 5 xbox one mod menu undetected
      -gta 5 xbox one mod menu with money drop
      -gta 5 xbox one mod menu how to use
      -gta 5 xbox one mod menu gameplay
      -gta 5 xbox one mod menu reddit
      -gta 5 xbox one mod menu sportskeeda
      -gta 5 xbox one mod menu youtube
      -gta 5 xbox one mod menu new scientist
      -gta 5 xbox one usb mods online
      -gta 5 xbox one usb mods tutorial
      -gta 5 xbox one usb mods download link
      -gta 5 xbox one usb mods no computer
      -gta 5 xbox one usb mods safe
      -gta 5 xbox one usb mods working
      -gta 5 xbox one usb mods latest version
      -gta 5 xbox one usb mods free money
      -gta 5 xbox one usb mods unlimited rp
      -gta 5 xbox one usb mods best features

      -

      Requirements for installing a GTA 5 mod menu

      -
        -
      • A computer with an internet connection
      • -
      • A USB drive with at least 8 GB of free space
      • -
      • A copy of GTA 5 on Xbox One (either disc or digital)
      • -
      • A compatible Xbox One controller (either wired or wireless)
      • -
      -

      Steps to download and install a GTA 5 mod menu

      -

      Step 1: Find a reliable mod menu website

      -

      The first step is to find a reliable and trustworthy website that offers GTA 5 mod menus for Xbox One. There are many websites that claim to provide mod menus, but some of them may be scams, outdated, or harmful to your device. Therefore, you should do some research and read reviews before downloading any mod menu from a website. Some of the factors that you should consider when choosing a mod menu website are:

      -
        -
      • The reputation and credibility of the website
      • -
      • The quality and variety of the mod menus available
      • -
      • The compatibility and security of the mod menus
      • -
      • The ease and speed of the download and installation process
      • -
      • The customer support and feedback system
      • -
      -

      One of the websites that we recommend for downloading GTA 5 mod menus for Xbox One is [Mod Menuz]. This website has a wide range of mod menus for different games and platforms, including GTA 5 on Xbox One. The mod menus are tested and verified by the website's team, and they are updated regularly to ensure that they work with the latest version of the game. The website also provides detailed instructions and videos on how to download and install the mod menus, as well as how to use them safely and effectively. The website also has a live chat and email support system, where you can ask questions and get help from the experts.

      -

      Step 2: Download the mod menu file to your computer

      -

      Once you have found a reliable mod menu website, you can proceed to download the mod menu file to your computer. The file will usually be in a compressed format, such as ZIP or RAR, which contains the mod menu files and folders. To download the file, you will need to follow these steps:

      -
        -
      1. Go to the mod menu website and browse through the available mod menus for GTA 5 on Xbox One.
      2. -
      3. Select the mod menu that you want to download and click on the download button.
      4. -
      5. You may need to complete a verification process, such as entering a captcha code or completing a survey, to prove that you are not a robot.
      6. -
      7. After completing the verification process, you will be redirected to a download page, where you can see the file name, size, and format.
      8. -
      9. Click on the download link or button and save the file to your computer.
      10. -
      -

      Step 3: Extract the mod menu file to a USB drive

      -

      After downloading the mod menu file to your computer, you will need to extract it to a USB drive. The USB drive should have at least 8 GB of free space and should be formatted to FAT32 or exFAT. To extract the file, you will need to follow these steps:

      -
        -
      1. Insert the USB drive into your computer's USB port.
      2. -
      3. Locate the downloaded mod menu file on your computer and right-click on it.
      4. -
      5. Select "Extract All" or "Extract Here" from the context menu.
      6. -
      7. A window will pop up asking you to choose a destination folder for the extracted files.
      8. -
      9. Select your USB drive as the destination folder and click on "Extract".
      10. -
      11. Wait for the extraction process to finish. You should see a folder with the same name as the mod menu file on your USB drive.
      12. -
      -

      Step 4: Plug the USB drive into your Xbox One console

      -

      After extracting the mod menu file to your USB drive, you will need to plug it into your Xbox One console. To do this, you will need to follow these steps:

      -
        -
      1. Turn on your Xbox One console and sign in with your account.
      2. -
      3. Insert the disc or launch the digital version of GTA 5 on your console.
      4. -
      5. Wait for the game to load and start playing.
      6. -
      7. Plug the USB drive into one of the USB ports on your console.
      8. -
      9. Your console should detect the USB drive automatically and prompt you to view its contents.
      10. -
      11. Select "Yes" or "OK" to view the contents of your USB drive.
      12. -
      -

      Step 5: Launch GTA 5 and activate the mod menu

      -

      The final step is to launch GTA 5 and activate the mod menu on your Xbox One console. To do this, you will need to follow these steps:

      -
        -
      1. Go back to GTA 5 and press the pause button on your controller.
      2. -
      3. Select "Online" from the pause menu and choose "Play GTA Online".
      4. -
      5. Select "Invite Only Session" or "Solo Session" from the online options. This will prevent other players from joining your session and reporting you for using mods.
      6. -
      7. Wait for the online session to load and spawn in the game world.
      8. -
      9. Press the button or combination of buttons that corresponds to the mod menu that you downloaded. For example, some mod menus may require you to press the RB + X buttons, while others may require you to press the D-pad + A buttons. You can check the instructions or video provided by the mod menu website to find out the correct button combination.
      10. -
      11. A mod menu screen should appear on your TV, showing different categories and options that you can choose from.
      12. -
      13. Use the D-pad or the left stick to navigate through the mod menu, and use the A button to select or activate an option. You can also use the B button to go back or exit the mod menu.
      14. -
      15. Enjoy using the mod menu features and options in GTA 5 online!
      16. -
      -

      How to use a GTA 5 mod menu on Xbox One

      -

      Now that you have installed and activated a GTA 5 mod menu on your Xbox One console, you may be wondering how to use it effectively and safely. Here are some tips and tricks that you should keep in mind when using a GTA 5 mod menu:

      -

      Features of a GTA 5 mod menu

      -

      A GTA 5 mod menu can have many features and options, depending on the type and version of the mod menu that you downloaded. Some of the common features that most mod menus have are:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

      Tips and tricks for using a GTA 5 mod menu

      -

      While using a GTA 5 mod menu can be fun and exciting, it can also be risky and dangerous. Therefore, you should follow these tips and tricks when using a GTA 5 mod menu:

      -
        -
      • Use a mod menu only in single-player mode or private online sessions. Do not use a mod menu in public online sessions or competitive modes, as this may get you banned or reported by other players.
      • -
      • Use a mod menu sparingly and moderately. Do not abuse or overuse the features and options of the mod menu, as this may ruin the balance and challenge of the game. Also, do not use too many features or options at once, as this may cause glitches or crashes in the game.
      • -
      • Use a mod menu responsibly and respectfully. Do not use a mod menu to harm or harass other players or NPCs in the game. Also, do not use a mod menu to cheat or gain an unfair advantage over other players or NPCs in the game.
      • -
      • Use a mod menu at your own risk and discretion. Be aware of the potential consequences and dangers of using a mod menu in GTA 5. Also, be prepared to face any issues or problems that may arise from using a mod menu in GTA 5.
      • -

        Conclusion

        -

        In conclusion, using a GTA 5 mod menu on Xbox One can be a great way to enhance your gaming experience and have more fun in GTA 5. However, you should also be careful and cautious when using a mod menu, as it can have some risks and drawbacks. You should always download and install a mod menu from a reliable and trustworthy website, and follow the instructions and guidelines on how to use it safely and effectively. You should also use a mod menu only in single-player mode or private online sessions, and avoid using it in public online sessions or competitive modes. You should also use a mod menu sparingly and moderately, and not abuse or overuse the features and options of the mod menu. You should also use a mod menu responsibly and respectfully, and not use it to harm or harass other players or NPCs in the game. And finally, you should use a mod menu at your own risk and discretion, and be aware of the potential consequences and dangers of using a mod menu in GTA 5. We hope that this article has helped you learn how to download and install a GTA 5 mod menu for Xbox One using a USB drive, as well as how to use it effectively and safely. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

        FAQs

        -

        Here are some of the frequently asked questions about GTA 5 mod menus on Xbox One:

        -
          -
        1. What is the best GTA 5 mod menu for Xbox One?
        2. -

          There is no definitive answer to this question, as different mod menus may have different features, options, quality, compatibility, security, and performance. However, some of the factors that you should consider when choosing a GTA 5 mod menu for Xbox One are the reputation and credibility of the website, the quality and variety of the mod menus available, the compatibility and security of the mod menus, the ease and speed of the download and installation process, the customer support and feedback system, and the features and options that suit your preferences and needs. -

        3. Can I use a GTA 5 mod menu on Xbox One without a USB drive?
        4. -

          No, you cannot use a GTA 5 mod menu on Xbox One without a USB drive. A USB drive is required to transfer the mod menu files from your computer to your console. You cannot download or install a GTA 5 mod menu directly from your console's internet browser or store. -

        5. Can I use a GTA 5 mod menu on Xbox One offline?
        6. -

          Yes, you can use a GTA 5 mod menu on Xbox One offline. However, you will need to download and install the mod menu online first, before you can use it offline. Also, some of the features and options of the mod menu may not work offline, such as spawning online vehicles or completing online missions. -

        7. Can I use a GTA 5 mod menu on Xbox One with friends?
        8. -

          Yes, you can use a GTA 5 mod menu on Xbox One with friends. However, you should only use it in private online sessions or invite-only sessions with your friends who are also using mod menus. Do not use it in public online sessions or competitive modes with other players who are not using mod menus, as this may get you banned or reported by them. -

        9. Can I get banned for using a GTA 5 mod menu on Xbox One?
        10. -

          Yes, you can get banned for using a GTA 5 mod menu on Xbox One. Using a mod menu is against the terms of service and end-user license agreement of Rockstar Games, which prohibit modifying or interfering with the game files or online services. If Rockstar detects that you are using a mod menu in GTA 5 online, they may ban your account temporarily or permanently, depending on the severity of your offense. They may also restrict your access to certain features or functions in the game. -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/workspace/readme.md b/spaces/skf15963/summary/fengshen/workspace/readme.md deleted file mode 100644 index ddeb5b856454074f86c9c3d079377cc58859897f..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/workspace/readme.md +++ /dev/null @@ -1,3 +0,0 @@ -# Readme - -这个目录主要用来存放训练中产生的日志文件、Checkpoint,以及一些examples初始化时需要的配置文件。 diff --git a/spaces/sklearn-docs/Density-Estimation-for-a-Gaussian-mixture/README.md b/spaces/sklearn-docs/Density-Estimation-for-a-Gaussian-mixture/README.md deleted file mode 100644 index b8e45a13c7584050cb31179b446c76a9278e3f9b..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Density-Estimation-for-a-Gaussian-mixture/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Density Estimation For A Gaussian Mixture -emoji: 📈 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/songwy/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py b/spaces/songwy/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py deleted file mode 100644 index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000 --- a/spaces/songwy/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import torch -from .monotonic_align.core import maximum_path_c - - -def maximum_path(neg_cent, mask): - """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/sparanoid/milky-green-svc/utils.py b/spaces/sparanoid/milky-green-svc/utils.py deleted file mode 100644 index b83c4601ad96d6b1e80a43e88593b887d4ea69d3..0000000000000000000000000000000000000000 --- a/spaces/sparanoid/milky-green-svc/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import argparse -import glob -import json -import logging -import os -import subprocess -import sys - -import numpy as np -import torch -from scipy.io.wavfile import read - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - # print(1111) - saved_state_dict = checkpoint_dict['model'] - # print(1111) - - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except Exception as e: - logger.info(e) - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = numpy.fromstring(fig.canvas.tostring_rgb(), dtype=numpy.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = numpy.fromstring(fig.canvas.tostring_rgb(), dtype=numpy.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warning("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warning("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_backtranslation_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_backtranslation_dataset.py deleted file mode 100644 index dffc3b49387dfdc046ea23d7db179377040b7cbc..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_backtranslation_dataset.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import tests.utils as test_utils -import torch -from fairseq.data import ( - BacktranslationDataset, - LanguagePairDataset, - TransformEosDataset, -) -from fairseq.sequence_generator import SequenceGenerator - - -class TestBacktranslationDataset(unittest.TestCase): - def setUp(self): - ( - self.tgt_dict, - self.w1, - self.w2, - self.src_tokens, - self.src_lengths, - self.model, - ) = test_utils.sequence_generator_setup() - - dummy_src_samples = self.src_tokens - - self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) - self.cuda = torch.cuda.is_available() - - def _backtranslation_dataset_helper( - self, - remove_eos_from_input_src, - remove_eos_from_output_src, - ): - tgt_dataset = LanguagePairDataset( - src=self.tgt_dataset, - src_sizes=self.tgt_dataset.sizes, - src_dict=self.tgt_dict, - tgt=None, - tgt_sizes=None, - tgt_dict=None, - ) - - generator = SequenceGenerator( - [self.model], - tgt_dict=self.tgt_dict, - max_len_a=0, - max_len_b=200, - beam_size=2, - unk_penalty=0, - ) - - backtranslation_dataset = BacktranslationDataset( - tgt_dataset=TransformEosDataset( - dataset=tgt_dataset, - eos=self.tgt_dict.eos(), - # remove eos from the input src - remove_eos_from_src=remove_eos_from_input_src, - ), - src_dict=self.tgt_dict, - backtranslation_fn=( - lambda sample: generator.generate([self.model], sample) - ), - output_collater=TransformEosDataset( - dataset=tgt_dataset, - eos=self.tgt_dict.eos(), - # if we remove eos from the input src, then we need to add it - # back to the output tgt - append_eos_to_tgt=remove_eos_from_input_src, - remove_eos_from_src=remove_eos_from_output_src, - ).collater, - cuda=self.cuda, - ) - dataloader = torch.utils.data.DataLoader( - backtranslation_dataset, - batch_size=2, - collate_fn=backtranslation_dataset.collater, - ) - backtranslation_batch_result = next(iter(dataloader)) - - eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 - - # Note that we sort by src_lengths and add left padding, so actually - # ids will look like: [1, 0] - expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) - if remove_eos_from_output_src: - expected_src = expected_src[:, :-1] - expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) - generated_src = backtranslation_batch_result["net_input"]["src_tokens"] - tgt_tokens = backtranslation_batch_result["target"] - - self.assertTensorEqual(expected_src, generated_src) - self.assertTensorEqual(expected_tgt, tgt_tokens) - - def test_backtranslation_dataset_no_eos_in_output_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=False, - remove_eos_from_output_src=True, - ) - - def test_backtranslation_dataset_with_eos_in_output_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=False, - remove_eos_from_output_src=False, - ) - - def test_backtranslation_dataset_no_eos_in_input_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=True, - remove_eos_from_output_src=False, - ) - - def assertTensorEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertEqual(t1.ne(t2).long().sum(), 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Man Phool Aur Aag.md b/spaces/stomexserde/gpt4-ui/Examples/Download Man Phool Aur Aag.md deleted file mode 100644 index c63d5de1ac8c7ecc6a43a098f83eb307eb843d7e..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Man Phool Aur Aag.md +++ /dev/null @@ -1,25 +0,0 @@ -
        -

        How to Download Phool Aur Aag, a Classic Hindi Action Movie

        -

        Phool Aur Aag is a 1999 Hindi action movie starring Mithun Chakraborty, Jackie Shroff and Archana. The movie revolves around Deva, a noble man who visits a neighboring town and saves a girl from a snake. However, his good deeds are misunderstood by the locals and his enemies, who try to frame him for various crimes. The movie is directed by T.L.V. Prasad and has a rating of 5.2 on IMDb.

        -

        If you are a fan of this movie and want to watch it online or download it to your device, here are some options you can try:

        -

        download man Phool Aur Aag


        Download ––– https://urlgoal.com/2uI6qS



        -
          -
        • JioCinema: This is an online streaming platform that offers a wide range of movies and shows in various languages. You can watch Phool Aur Aag in full HD quality on JioCinema if you have a Jio SIM card or a JioFiber connection. You can also download the movie to watch offline.
        • -
        • IMDb: This is a popular website that provides information and ratings for movies and shows. You can watch Phool Aur Aag on IMDb if you have an Amazon Prime Video subscription. You can also rent or buy the movie from Amazon.
        • -
        • JioSaavn: This is an online music streaming service that offers songs and albums in various languages. You can listen to the songs of Phool Aur Aag on JioSaavn if you have a JioSaavn Pro subscription. You can also download the songs to listen offline.
        • -
        -

        These are some of the ways you can enjoy Phool Aur Aag, a classic Hindi action movie from 1999. We hope you have a great time watching or listening to this movie.

        - -

        Phool Aur Aag is not only an action movie, but also a comedy and a drama. The movie has some memorable scenes and dialogues that will make you laugh and cry. Some of the highlights of the movie are:

        -
          -
        • The song "Main Gaaon Dil Gaaye", which features a cameo appearance by Harish and Ayesha Jhulka. The song is a romantic duet that shows the chemistry between Deva and Jayanti, the girl he saves from the snake.
        • -
        • The fight scene between Deva and Jaswant, the local leader who misunderstands Deva's intentions. The scene is a showcase of Mithun Chakraborty's and Jackie Shroff's martial arts skills and stunts.
        • -
        • The climax scene, where Deva confronts Rattan Choudhury, the main villain who plots against him. The scene is a thrilling showdown that reveals the truth behind Deva's past and his relationship with Choudhury.
        • -
        -

        Phool Aur Aag is a movie that has something for everyone. It is a blend of action, comedy and drama that will keep you entertained from start to finish. If you are looking for a classic Hindi movie to watch or download, Phool Aur Aag is a great choice.

        - -

        If you are wondering where to download Phool Aur Aag, you should be aware of the legal and ethical issues involved. Downloading movies from unauthorized sources can be illegal and can harm the movie industry. You should always respect the rights of the creators and distributors of the movies and pay for their work. You should also avoid downloading movies from websites that may contain viruses or malware that can damage your device or compromise your privacy.

        -

        Therefore, we recommend that you use only the official and legal platforms that we have mentioned above to watch or download Phool Aur Aag. These platforms are safe and secure and offer high-quality content. They also have a variety of other movies and shows that you can enjoy at your convenience.

        -

        We hope this article has helped you learn more about Phool Aur Aag, a classic Hindi action movie from 1999. We also hope that you have found a suitable way to watch or download this movie online or offline. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Downloadspss20fullcrack64bit11 [TOP].md b/spaces/stomexserde/gpt4-ui/Examples/Downloadspss20fullcrack64bit11 [TOP].md deleted file mode 100644 index fb9d20604e88ef4016d0a4fee5c50d6a22ccdc45..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Downloadspss20fullcrack64bit11 [TOP].md +++ /dev/null @@ -1,79 +0,0 @@ -
        -

        How to Download and Install SPSS 20 Full Crack 64 Bit for Free

        -

        If you are looking for a software that can help you with data analysis and visualization, you might have heard of SPSS. SPSS stands for Statistical Program for Social Sciences, and it is one of the most popular and widely used software in this field. However, SPSS is not a free software, and you need to pay a license fee to use it. Fortunately, there is a way to download and install SPSS 20 full crack 64 bit for free on your computer. In this article, we will show you how to do that step by step.

        -

        downloadspss20fullcrack64bit11


        Download ———>>> https://urlgoal.com/2uIa7N



        -

        What is SPSS and why do you need it?

        -

        SPSS is a software that allows you to perform various types of data analysis and visualization. It can handle both quantitative and qualitative data, and it can work with different formats of data such as Excel, SAS, SQL, etc. With SPSS, you can do tasks such as:

        -
          -
        • Descriptive statistics: summarize and display the characteristics of your data using measures of central tendency, dispersion, frequency, etc.
        • -
        • Inferential statistics: test hypotheses and draw conclusions about your data using methods such as t-test, ANOVA, chi-square test, correlation, regression, etc.
        • -
        • Regression: model the relationship between one or more independent variables and a dependent variable using linear or nonlinear regression techniques.
        • -
        • ANOVA: compare the means of two or more groups using one-way or two-way analysis of bit, you are not getting any updates, patches, fixes, or improvements from SPSS and its developers. You are also not getting any technical support or customer service from them. You may encounter problems or issues that are not resolved or addressed by SPSS 20 full crack 64 bit.
        • -
        -

        These are some of the drawbacks and risks that you may face from using SPSS 20 full crack 64 bit. Therefore, we strongly advise you to use the official and legal version of SPSS instead. You can purchase a license or subscription from SPSS or its authorized distributors, or you can use the free trial version of SPSS for a limited time. This way, you can enjoy the full features and functions of SPSS without any hassle or worry.

        -

        How to download SPSS 20 full crack 64 bit for free?

        -

        If you still want to download SPSS 20 full crack 64 bit for free, you need to follow these steps:

        -
          -
        1. Search for SPSS 20 full crack 64 bit on the internet. You can use any search engine such as Google, Bing, Yahoo, etc. You will find many websites and links that claim to offer SPSS 20 full crack 64 bit for free download.
        2. -
        3. Select a website or link that looks trustworthy and reliable. You can check the reviews, ratings, comments, feedback, etc. of other users who have downloaded SPSS 20 full crack 64 bit from that website or link. You can also check the domain name, URL, and SSL certificate of the website or link to see if it is secure and legitimate.
        4. -
        5. Download the file of SPSS 20 full crack 64 bit from the website or link. You need to check the file size, password, and virus scan before downloading. The file size should be around 900 MB, the password should be provided by the website or link, and the virus scan should be clean and clear. If the file size is too small or too large, the password is missing or incorrect, or the virus scan is positive or suspicious, do not download the file.
        6. -
        7. Extract the file of SPSS 20 full crack 64 bit with Winrar or other software. You need to enter the password that you got from the website or link to extract the file. You will get a folder that contains the setup file and the crack file of SPSS 20 full crack 64 bit.
        8. -
        -

        These are the steps that you need to follow to download SPSS 20 full crack 64 bit for free. However, we remind you again that this is an illegal and unethical way of using SPSS, and we do not recommend or endorse it. We suggest you use the official and legal version of SPSS instead.

        -

        -

        How to install SPSS 20 full crack 64 bit on your computer?

        -

        If you have downloaded SPSS 20 full crack 64 bit for free, you need to follow these steps to install it on your computer:

        -
          -
        1. Run the setup file of SPSS 20 full crack 64 bit as administrator. You need to right-click on the setup file and select "Run as administrator" from the menu. This will start the installation process of SPSS 20 full crack 64 bit on your computer.
        2. -
        3. Follow the instructions on the screen to install SPSS 20 full crack 64 bit on your computer. You need to accept the terms and conditions, choose the destination folder, select the components, etc. The installation process may take some time depending on your computer speed and system.
        4. -
        5. Copy and paste the crack file of SPSS 20 full crack bit into the installation folder of SPSS 20 full crack 64 bit. You need to open the folder where you installed SPSS 20 full crack 64 bit, and then paste the crack file into it. This will replace the original file of SPSS 20 full crack 64 bit with the cracked one.
        6. -
        7. Register the software of SPSS 20 full crack 64 bit with the serial number provided. You need to open SPSS 20 full crack 64 bit on your computer, and then enter the serial number that you got from the website or link. This will activate and register the software of SPSS 20 full crack 64 bit on your computer.
        8. -
        -

        These are the steps that you need to follow to install SPSS 20 full crack 64 bit on your computer. However, we remind you again that this is an illegal and unethical way of using SPSS, and we do not recommend or endorse it. We suggest you use the official and legal version of SPSS instead.

        -

        How to use SPSS 20 full crack 64 bit for data analysis?

        -

        If you have installed SPSS 20 full crack 64 bit on your computer, you can use it for data analysis and visualization. Here are some basic steps that you can follow to use SPSS 20 full crack 64 bit for data analysis:

        -
          -
        1. Open SPSS 20 full crack 64 bit on your computer. You will see the main window of SPSS 20 full crack 64 bit, which has two views: Data View and Variable View. Data View shows the data in a spreadsheet format, while Variable View shows the properties and attributes of the variables.
        2. -
        3. Import or enter your data into SPSS 20 full crack 64 bit. You can import your data from various sources such as Excel, SAS, SQL, etc. by using the File menu and selecting Open or Import. You can also enter your data manually by typing or pasting it into the Data View cells.
        4. -
        5. Choose the appropriate analysis method from the menu or dialog box. You can choose from various types of analysis methods such as descriptive statistics, inferential statistics, regression, ANOVA, etc. by using the Analyze menu and selecting the submenus or options. You can also use the dialog boxes that appear after selecting an analysis method to specify the variables, parameters, options, etc.
        6. -
        7. Interpret and report the results using tables, charts, or graphs. You can view the results of your analysis in the Output window, which shows the tables, charts, or graphs that are generated by SPSS 20 full crack 64 bit. You can also edit, customize, or export these tables, charts, or graphs by using the Edit menu and selecting the submenus or options.
        8. -
        -

        These are some basic steps that you can follow to use SPSS 20 full crack 64 bit for data analysis and visualization. However, you should also learn more about the features and functions of SPSS 20 full crack 64 bit by reading the manuals, tutorials, guides, etc. that are available online or offline.

        -

        Conclusion

        -

        In this article, we have shown you how to download and install SPSS 20 full crack 64 bit for free on your computer. We have also explained what SPSS is and why you need it, what are the benefits and drawbacks of using SPSS 20 full crack 64 bit, and how to use SPSS 20 full crack 64 bit for data analysis and visualization. However, we have also warned you that using SPSS 20 full crack 64 bit is illegal and unethical, and we have advised you to use the official and legal version of SPSS instead. We hope that this article has been helpful and informative for you, and that you have learned something new and useful from it.

        -

        FAQs

        -

        Here are some frequently asked questions and answers about SPSS 20 full crack 64 bit:

        -

        Is SPSS 20 full crack 64 bit legal?

        -

        No, SPSS 20 full crack 64 bit is not legal. It is a cracked or modified version of SPSS that bypasses the license verification and activation process. By using SPSS 20 full crack 64 bit, you are violating the terms and conditions of SPSS and its developers, and you may face legal consequences and penalties if you are caught or reported.

        -

        What are the system requirements for SPSS 20 full crack 64 bit?

        -

        The system requirements for SPSS 20 full crack 64 bit are:

        -
          -
        • Operating system: Windows 7, 8.1, or 10 (64-bit)
        • -
        • Processor: Intel or AMD with SSE2 support
        • -
        • Memory: 1 GB RAM or more
        • -
        • Disk space: 1 GB or more
        • -
        • Display: 1024 x 768 resolution or higher
        • -
        • Internet connection: Required for download and installation
        • -
        -

        How to fix the font error in SPSS 20 full crack 64 bit?

        -

        If you encounter a font error in SPSS 20 full crack 64 bit, such as "The current font is not supported by the Unicode standard", you can try these steps to fix it:

        -
          -
        1. Close SPSS 20 full crack 64 bit.
        2. -
        3. Go to the installation folder of SPSS 20 full crack bit. It is usually located in C:\Program Files\IBM\SPSS\Statistics\20.
        4. -
        5. Find and delete the file named spssprod.inf.
        6. -
        7. Restart SPSS 20 full crack 64 bit.
        8. -
        -

        This should fix the font error in SPSS 20 full crack 64 bit. However, if the error persists, you may need to reinstall SPSS 20 full crack 64 bit or use a different version of SPSS.

        -

        How to update SPSS 20 full crack 64 bit?

        -

        You cannot update SPSS 20 full crack 64 bit. Since it is a cracked or modified version of SPSS, it does not have any connection or access to the official updates, patches, fixes, or improvements from SPSS and its developers. If you want to use the latest version of SPSS with the latest features and functions, you need to use the official and legal version of SPSS instead.

        -

        How to uninstall SPSS 20 full crack 64 bit?

        -

        If you want to uninstall SPSS 20 full crack 64 bit from your computer, you can follow these steps:

        -
          -
        1. Go to the Control Panel of your computer and select Programs and Features.
        2. -
        3. Find and select IBM SPSS Statistics 20 from the list of programs and click Uninstall.
        4. -
        5. Follow the instructions on the screen to uninstall SPSS 20 full crack 64 bit from your computer.
        6. -
        7. Delete the installation folder of SPSS 20 full crack 64 bit if it still exists on your computer.
        8. -
        -

        This should uninstall SPSS 20 full crack 64 bit from your computer. However, if you encounter any problems or issues during the uninstallation process, you may need to use a third-party software or tool to remove SPSS 20 full crack 64 bit completely from your computer.

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/tests/common_utils/__init__.py b/spaces/studiobrn/SplitTrack/tests/common_utils/__init__.py deleted file mode 100644 index 74ffcfef96fec35c99b2a1a053a61f44f7a8bbe9..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/tests/common_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .temp_utils import TempDirMixin -from .wav_utils import get_batch_white_noise, get_white_noise, save_wav diff --git a/spaces/sub314xxl/Analog-Diffusion/README.md b/spaces/sub314xxl/Analog-Diffusion/README.md deleted file mode 100644 index 00af8ea36021a31457f15db365f0fa401e93fa6b..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/Analog-Diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Analog Diffusion -emoji: 💻 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -duplicated_from: wavymulder/Analog-Diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ApowerREC 1.3.2.8 Crack [CracksMind].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ApowerREC 1.3.2.8 Crack [CracksMind].md deleted file mode 100644 index edabc3fdf634a50b15eb9d00d56e5dac71aea44b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ApowerREC 1.3.2.8 Crack [CracksMind].md +++ /dev/null @@ -1,5 +0,0 @@ - -

        20170125[/url]
        Fasta 25057027 513 iMGSRC.RU [url= in [url=
        Sweet 16 years girl (EZ), 12451342862 iMGSRC.RU [url= A Powerdream Beginners Guide the How To Ipods iPhone PSP Gps Android 3 [url= Taiseertaids [url= New In Model 2017, New In Model 2017 Buy New In Model 2017 iMGSRC.RU [url=
        [url= fasta 25057027 iMGSRC.RU [url= NatttureCemFrawlHem [url=
        [url= Music Mp3 Download Mp3 Song I Love Cheap Thrills (6 MB) Free Full Download All Music[/url] briletypeAbumunult [url= otro objetivo de mi pasión en la vida cracker Second download Virus free ([url= [url= Parmalat - Cipanipoza Formula [url= 13hive-hip-hop-mixtapes-download-for-free-mp3-[url= Nokia Lumia Phones 2013 - Nokia Lumia 1320 - Nokia Lumia 1520 - Nokia Lumia 1110 - Nokia Lumia 2520 - Nokia Lumia 1320 - Nokia Lumia 1520 - Nokia Lumia 2520 - Nokia Lumia 521 - Nokia Lumia 1520 Download Full Version [url= NatttureCemFrawlHem [url= blu-ray tippin [url= ReFWocheNuththegodat [url= 2 en ligne.Co http: mahu 1080p mp4 iMGSRC.RU[/url]Lineage-os-no-sound-fix [url= download demuxer taks2 taks2.zip [url=
        [url= skins-and-clocks-championships-rom-2014 [url=
        [url= hechizo-de-vela-negra-para-destruir-enemigo [url= blu-ray tippin iMGSRC.RU[/url]DrediuhIrrivataree [url= a em o armageddon [url= free direct download.or download dontobro [url= http: download nova smalltown.rar iMGSRC.RU[/url]
        3D Logo Creator v2.0.1 [url= [url= Stags Live Stream Online[/url] Emaill Not. Deutschland [url= web-x-site.ru [url=311b03d2 [url= nder den [url=
        Tracy Wonton - Funky 100pc [url= ReFWocheNuththegodat [url= 100 Gallon Solar Heater ; [url= flissinneple [url=
        sexy girl nude video [url= download Fake Fickt [url= urlremix.ru [url= http: Flissinneple [url= Stags Live Stream Online[/url] linea del emisor hd iMGSRC.

        -

        ApowerREC 1.3.2.8 Crack [CracksMind]


        Download ❤❤❤ https://cinurl.com/2uEY1B



        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Highly Compressed Porn Movies.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Highly Compressed Porn Movies.md deleted file mode 100644 index 6e4023032d8b913605f873756a9beb6619f0951c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Highly Compressed Porn Movies.md +++ /dev/null @@ -1,44 +0,0 @@ -

        highly compressed porn movies


        Downloadhttps://cinurl.com/2uEYhO



        -
        -can often be found in a search for "tube". The "tube" website also offers many porn movies. Many porn movies today are made for downloads on a pc. In order to download, you need to have "divx" and other plugins. Some sex movie downloads are often labeled "computer video". - -Many people enjoy downloading movies and using them for a long time without any interruption. - -Some of the most popular sites for downloading are - -some kind of free porn website. The "tube" website has over a million videos available for free. Many other free sites often allow the movies to be viewed for free for a limited time. - -After the time has expired, the videos are removed. - -Some of the top free websites for downloading are - -Many people enjoy watching videos on the computer. For some people, the naked bodies of women or girls in the videos may be enough. For others, the sex scenes are the most interesting part. Most people enjoy watching nude movies. - -Some of the top free websites for downloading include - -Downloading videos is not only restricted to video porn movies. It is also used for - -If you are looking for videos of kissing, some of the top free websites for downloading include - -The best sites for downloading are the free ones. Many people appreciate the availability of videos. Many people who use the web often use a search engine. - -The "Tube" website also has a very long history of offering videos for free. Many of these movies are available to watch without cost. There are many sites that offer many naked videos. These naked videos are very popular and have more than a million videos. - -Some of the top free sites for downloading are - -Many people enjoy watching videos on the pc. If you are looking for an extensive collection of porn, you can find many different sites on the internet. Many porn movies can be downloaded for free. - -Some of the top websites for downloading are - -Free downloadable movies often have the highest quality. Many people have a membership to many porn sites. Many people will have a limited download. - -There are many good websites for downloading. They offer an extensive collection of movies. Many people enjoy watching free movies. - -There are many great sites that offer a huge collection of videos. These videos often include all types of movies. Some of the top websites for downloading include - -Some of the most popular download sites are - -Downloading videos on the computer is not only for sex movies 4fefd39f24
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hip Hop Ejay 5 ((TOP)) Crack.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hip Hop Ejay 5 ((TOP)) Crack.md deleted file mode 100644 index 2c5f9eeb410cc36e1e0b5defc7867aeb9d9b0224..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hip Hop Ejay 5 ((TOP)) Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

        hip hop ejay 5 crack


        Download File ===== https://cinurl.com/2uEXyS



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ahnlab V3 Internet Security 8.0 Keygen Software !FULL!.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ahnlab V3 Internet Security 8.0 Keygen Software !FULL!.md deleted file mode 100644 index 4cfdb7e838b1a02253caf4ed5325585acbcb4076..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ahnlab V3 Internet Security 8.0 Keygen Software !FULL!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        ahnlab v3 internet security 8.0 keygen software


        Downloadhttps://urluss.com/2uCGAO



        -
        -Acronis Disk Director Build 96 + Key + BootCD - software world Freeware ... Leading ... Ahnlab V3 Internet Security 8.0 Full Crack Antivirus 1fdad05405
        -
        -
        -

        diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/nl_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/nl_head.py deleted file mode 100644 index 3eee424199e6aa363b564e2a3340a070db04db86..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/nl_head.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import NonLocal2d - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class NLHead(FCNHead): - """Non-local Neural Networks. - - This head is the implementation of `NLNet - `_. - - Args: - reduction (int): Reduction factor of projection transform. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by - sqrt(1/inter_channels). Default: True. - mode (str): The nonlocal mode. Options are 'embedded_gaussian', - 'dot_product'. Default: 'embedded_gaussian.'. - """ - - def __init__(self, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - **kwargs): - super(NLHead, self).__init__(num_convs=2, **kwargs) - self.reduction = reduction - self.use_scale = use_scale - self.mode = mode - self.nl_block = NonLocal2d( - in_channels=self.channels, - reduction=self.reduction, - use_scale=self.use_scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - mode=self.mode) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.nl_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/patch_embedding.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/patch_embedding.py deleted file mode 100644 index 7b2ffb56a72d9da3ef471be6869375e46de2625c..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/patch_embedding.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -import torch.nn as nn - - -class PatchEmbedding(nn.Module): - """ - Image to Patch Embedding for Vision Transformer. - """ - - def __init__(self, args, _): - super(PatchEmbedding, self).__init__() - self.cls_emb = nn.Parameter(torch.zeros(1, 1, args.emb_size)) - self.image_height = args.image_height - self.image_width = args.image_width - patch_size = (args.patch_size, args.patch_size) - channels_num = args.channels_num - - self.projection = nn.Conv2d(channels_num, args.emb_size, kernel_size=patch_size, stride=patch_size, bias=False) - - def forward(self, src, _): - # batch_size, channels_num, height, width - batch_size, _, height, width = src.shape - if height != self.image_height or width != self.image_width: - raise ValueError( - f"Input image size ({height}*{width}) doesn't match model ({self.image_height}*{self.image_width})." - ) - patch_emb = self.projection(src).flatten(2).transpose(1, 2) - cls_emb = self.cls_emb.expand(batch_size, -1, -1).to(patch_emb.device) - patch_emb = torch.cat((cls_emb, patch_emb), dim=1) - - return patch_emb diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/layers/relative_position_embedding.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/layers/relative_position_embedding.py deleted file mode 100644 index 3943cb4e98031dc5cd232cdaecede39004389605..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/layers/relative_position_embedding.py +++ /dev/null @@ -1,86 +0,0 @@ -import math -import torch -import torch.nn as nn - -class RelativePositionEmbedding(nn.Module): - """ Relative Position Embedding - https://arxiv.org/abs/1910.10683 - https://github.com/bojone/bert4keras/blob/db236eac110a67a587df7660f6a1337d5b2ef07e/bert4keras/layers.py#L663 - https://github.com/huggingface/transformers/blob/master/src/transformers/models/t5/modeling_t5.py#L344 - """ - def __init__(self, heads_num, bidirectional = True, num_buckets = 32, max_distance = 128): - super(RelativePositionEmbedding, self).__init__() - self.num_buckets = num_buckets - self.bidirectional = bidirectional - self.max_distance = max_distance - self.relative_attention_bias = nn.Embedding(self.num_buckets, heads_num) - - def forward(self, encoder_hidden, decoder_hidden): - """ - Compute binned relative position bias - Args: - encoder_hidden: [batch_size x seq_length x emb_size] - decoder_hidden: [batch_size x seq_length x emb_size] - Returns: - position_bias: [1 x heads_num x seq_length x seq_length] - """ - query_length = encoder_hidden.size()[1] - key_length = decoder_hidden.size()[1] - - context_position = torch.arange(query_length, dtype=torch.long)[:, None] - memory_position = torch.arange(key_length, dtype=torch.long)[None, :] - relative_position = memory_position - context_position # shape (query_length, key_length) - relative_position_bucket = self.relative_position_bucket( - relative_position, # shape (query_length, key_length) - bidirectional=self.bidirectional, - num_buckets=self.num_buckets, - max_distance=self.max_distance - ) - relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) - values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) - values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) - return values - - def relative_position_bucket(self, relative_position, bidirectional, num_buckets, max_distance): - """ - Adapted from Mesh Tensorflow: - https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 - Translate relative position to a bucket number for relative attention. The relative position is defined as - memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to - position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for - small absolute relative_position and larger buckets for larger absolute relative_positions. All relative - positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. - This should allow for more graceful generalization to longer sequences than the model has been trained on - Args: - relative_position: an int32 Tensor - bidirectional: a boolean - whether the attention is bidirectional - num_buckets: an integer - max_distance: an integer - Returns: - a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) - """ - relative_buckets = 0 - if bidirectional: - num_buckets //= 2 - relative_buckets += (relative_position > 0).to(torch.long) * num_buckets - relative_position = torch.abs(relative_position) - else: - relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) - # now relative_position is in the range [0, inf) - - # half of the buckets are for exact increments in positions - max_exact = num_buckets // 2 - is_small = relative_position < max_exact - - # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance - relative_postion_if_large = max_exact + ( - torch.log(relative_position.float() / max_exact) - / math.log(max_distance / max_exact) - * (num_buckets - max_exact) - ).to(torch.long) - relative_postion_if_large = torch.min( - relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) - ) - - relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large) - return relative_buckets diff --git a/spaces/t13718236382/bingoGPT4/postcss.config.js b/spaces/t13718236382/bingoGPT4/postcss.config.js deleted file mode 100644 index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/spaces/tang155/bingo/src/components/header.tsx b/spaces/tang155/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
        -
        - -
        -
        - ) -} diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/serve/__init__.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/serve/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/terfces0erbo/CollegeProjectV2/Kitab Zabur Asli.pdfl !EXCLUSIVE!.md b/spaces/terfces0erbo/CollegeProjectV2/Kitab Zabur Asli.pdfl !EXCLUSIVE!.md deleted file mode 100644 index 94888b20beebcc2388c545fa6ae831a0c73c8dd8..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Kitab Zabur Asli.pdfl !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Kitab Zabur Asli.pdfl


        Download File →→→ https://bytlly.com/2uGkIZ



        - -Reading the Holy ZABUR (Psalter) Vs. Holy Taurat (Torah) Vs. Holy Bible (Injil) Vs. Holy Koran. KITAB ZABUR VS KITAB TAURAT VS KITAB INJIL VS KITAB AL KORAN. What is the difference? At first glance, it seems that all three books are similar to each other. Indeed, there are books that are similar to our own, with which we have known since childhood. For example, the Bible and the Koran, which we know as our own, although they are far from identical in content, form and style. However, there are other books that differ from those known to us in that their content and form are completely different. a book that narrates directly from the imam. - ed.) He says: “Whoever finds any of you in the desert, and he wants to kill him, let him kill him for the love of Allah. It's better for him than if he survived. And if anyone finds any of you in the city and he wants to kill him, let him kill him for the love of Allah. Indeed, Allah loves those who do good, and Allah is the best of those who do good.” (Abu Dawud; Ibn Maja; al-Nasa'i; Ahmad). We all know very well that Muslims do not like to kill, and this is true. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/teven-projects/calculator/README.md b/spaces/teven-projects/calculator/README.md deleted file mode 100644 index 4865afc6ab4f8a0bd9999b82a4a2797145e6b03a..0000000000000000000000000000000000000000 --- a/spaces/teven-projects/calculator/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Calculator -emoji: 🏃 -colorFrom: purple -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/theekshana/boardpac_chat_app_test/qaPipeline.py b/spaces/theekshana/boardpac_chat_app_test/qaPipeline.py deleted file mode 100644 index c2e41c5c40910d5db89a3f665e31e3c22f1ba527..0000000000000000000000000000000000000000 --- a/spaces/theekshana/boardpac_chat_app_test/qaPipeline.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -Python Backend API to chat with private data - -08/14/2023 -D.M. Theekshana Samaradiwakara -""" - -import os -import time - -from dotenv import load_dotenv - -from langchain.chains import RetrievalQA -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler - -from langchain.llms import GPT4All -from langchain.llms import HuggingFaceHub -from langchain.chat_models import ChatOpenAI -from langchain.chat_models import ChatAnyscale - -# from langchain.retrievers.self_query.base import SelfQueryRetriever -# from langchain.chains.query_constructor.base import AttributeInfo - -# from chromaDb import load_store -from faissDb import load_FAISS_store - -from langchain.agents import ZeroShotAgent, Tool, AgentExecutor - -from langchain.prompts import PromptTemplate -from langchain.chains import LLMChain, ConversationalRetrievalChain -from conversationBufferWindowMemory import ConversationBufferWindowMemory -from langchain.memory import ReadOnlySharedMemory - -load_dotenv() - -#gpt4 all model -gpt4all_model_path = os.environ.get('GPT4ALL_MODEL_PATH') -model_n_ctx = os.environ.get('MODEL_N_CTX') -model_n_batch = int(os.environ.get('MODEL_N_BATCH',8)) -target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4)) - -openai_api_key = os.environ.get('OPENAI_API_KEY') -anyscale_api_key = os.environ.get('ANYSCALE_ENDPOINT_TOKEN') - -verbose = os.environ.get('VERBOSE') - -# activate/deactivate the streaming StdOut callback for LLMs -callbacks = [StreamingStdOutCallbackHandler()] - -memory = ConversationBufferWindowMemory( - memory_key="chat_history", - input_key="question", - return_messages=True, - k=3 - ) - -readonlymemory = ReadOnlySharedMemory(memory=memory) - -class Singleton: - __instance = None - @staticmethod - def getInstance(): - """ Static access method. """ - if Singleton.__instance == None: - Singleton() - return Singleton.__instance - def __init__(self): - """ Virtually private constructor. """ - if Singleton.__instance != None: - raise Exception("This class is a singleton!") - else: - Singleton.__instance = QAPipeline() - -def get_local_LLAMA2(): - import torch - from transformers import AutoTokenizer, AutoModelForCausalLM - - tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-13b-chat-hf", - # use_auth_token=True, - ) - - model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-13b-chat-hf", - device_map='auto', - torch_dtype=torch.float16, - use_auth_token=True, - # load_in_8bit=True, - # load_in_4bit=True - ) - from transformers import pipeline - - pipe = pipeline("text-generation", - model=model, - tokenizer= tokenizer, - torch_dtype=torch.bfloat16, - device_map="auto", - max_new_tokens = 512, - do_sample=True, - top_k=30, - num_return_sequences=1, - eos_token_id=tokenizer.eos_token_id - ) - - from langchain import HuggingFacePipeline - LLAMA2 = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0}) - print(f"\n\n> torch.cuda.is_available(): {torch.cuda.is_available()}") - print("\n\n> local LLAMA2 loaded") - return LLAMA2 - -class QAPipeline: - - def __init__(self): - - print("\n\n> Initializing QAPipeline:") - self.llm_name = None - self.llm = None - - self.dataset_name = None - self.vectorstore = None - - self.qa_chain = None - self.agent = None - - - def run(self,query, model, dataset): - - if (self.llm_name != model) or (self.dataset_name != dataset) or (self.qa_chain == None): - self.set_model(model) - self.set_vectorstore(dataset) - self.set_qa_chain() - - # Get the answer from the chain - start = time.time() - res = self.qa_chain(query) - # answer, docs = res['result'],res['source_documents'] - end = time.time() - - # Print the result - print("\n\n> Question:") - print(query) - print(f"\n> Answer (took {round(end - start, 2)} s.):") - print( res) - - return res - - def run_agent(self,query, model, dataset): - - try: - - if (self.llm_name != model) or (self.dataset_name != dataset) or (self.agent == None): - self.set_model(model) - self.set_vectorstore(dataset) - self.set_qa_chain_with_agent() - - # Get the answer from the chain - start = time.time() - res = self.agent(query) - # answer, docs = res['result'],res['source_documents'] - end = time.time() - - # Print the result - print("\n\n> Question:") - print(query) - print(f"\n> Answer (took {round(end - start, 2)} s.):") - print( res) - - return res["output"] - - except Exception as e: - # logger.error(f"Answer retrieval failed with {e}") - print(f"> QAPipeline run_agent Error : {e}")#, icon=":books:") - return - - - def set_model(self,model_type): - if model_type != self.llm_name: - match model_type: - case "gpt4all": - # self.llm = GPT4All(model=gpt4all_model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=verbose) - self.llm = GPT4All(model=gpt4all_model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=verbose) - # self.llm = HuggingFaceHub(repo_id="nomic-ai/gpt4all-j", model_kwargs={"temperature":0.001, "max_length":1024}) - case "google/flan-t5-xxl": - self.llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.001, "max_length":1024}) - case "tiiuae/falcon-7b-instruct": - self.llm = HuggingFaceHub(repo_id=model_type, model_kwargs={"temperature":0.001, "max_length":1024}) - case "openai": - self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) - case "Deci/DeciLM-6b-instruct": - self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b-instruct", temperature=0) - case "Deci/DeciLM-6b": - self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b", temperature=0) - case "local/LLAMA2": - self.llm = get_local_LLAMA2() - case "anyscale/Llama-2-13b-chat-hf": - self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-13b-chat-hf', streaming=False) - case "anyscale/Llama-2-70b-chat-hf": - self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-70b-chat-hf', streaming=False) - case _default: - # raise exception if model_type is not supported - raise Exception(f"Model type {model_type} is not supported. Please choose a valid one") - - self.llm_name = model_type - - - - def set_vectorstore(self, dataset): - if dataset != self.dataset_name: - # self.vectorstore = load_store(dataset) - self.vectorstore = load_FAISS_store() - print("\n\n> vectorstore loaded:") - self.dataset_name = dataset - - def set_qa_chain(self): - - self.qa_chain = RetrievalQA.from_chain_type( - llm=self.llm, - chain_type="stuff", - retriever = self.vectorstore.as_retriever(), - # retriever = self.vectorstore.as_retriever(search_kwargs={"k": target_source_chunks} - return_source_documents= True - ) - - - def set_qa_chain_with_agent(self): - - try: - - # Define a custom prompt - general_qa_template = ( - """[INST]<> You are the AI of company boardpac which provide services to company board members related to banking and financial sector. You should only continue the conversation and reply to users questions like welcomes, greetings and goodbyes. - If you dont know the answer say you dont know, dont try to makeup answers. Answer should be short and simple as possible. Start the answer with code word Boardpac AI (chat): <> - Conversation: {chat_history} - Question: {question} [/INST]""" - ) - - general_qa_chain_prompt = PromptTemplate(input_variables=["question", "chat_history"], template=general_qa_template) - - general_qa_chain = LLMChain( - llm=self.llm, - prompt=general_qa_chain_prompt, - verbose=True, - memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory - ) - - general_qa_chain_tool = Tool( - name="general qa", - func= general_qa_chain.run, - description='''use this when only you need to answer questions like welcomes, greetings and goodbyes. - Input should be a fully formed question.''', - return_direct=True, - - ) - - # Define a custom prompt - retrieval_qa_template = ( - """[INST]<> You are the AI of company boardpac which provide services to company board members. Only answer questions related to Banking and Financial Services Sector like Banking & Financial regulations, legal framework, governance framework, compliance requirements as per Central Bank regulations. - please answer the question based on the chat history and context information provided below related to central bank acts published in various years. The published year is mentioned as the metadata 'year' of each source document. - The content of a bank act of a past year can updated by a bank act from a latest year. Always try to answer with latest information and mention the year which information extracted. - If you dont know the answer say you dont know, dont try to makeup answers. Answer should be short and simple as possible. Start the answer with code word Boardpac AI (QA): <> - Conversation: {chat_history} - Context: {context} - Question : {question} [/INST]""" - ) - - retrieval_qa_chain_prompt = PromptTemplate( - input_variables=["question", "context", "chat_history"], - template=retrieval_qa_template - ) - - document_combine_prompt = PromptTemplate( - input_variables=["source","year", "page","page_content"], - template= - """ source: {source}, year: {year}, page: {page}, page content: {page_content} """ - ) - - bank_regulations_qa = ConversationalRetrievalChain.from_llm( - llm=self.llm, - chain_type="stuff", - retriever = self.vectorstore.as_retriever(), - # retriever = self.vectorstore.as_retriever( - # search_type="mmr", - # search_kwargs={ - # 'k': 6, - # # 'lambda_mult': 0.1, - # 'fetch_k': 50}, - # # search_type="similarity_score_threshold", - # # search_kwargs={"score_threshold": .5} - # ), - return_source_documents= True, - return_generated_question= True, - get_chat_history=lambda h : h, - combine_docs_chain_kwargs={ - "prompt": retrieval_qa_chain_prompt, - "document_prompt": document_combine_prompt, - }, - verbose=True, - memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory - ) - - bank_regulations_qa_tool = Tool( - name="bank regulations", - func= lambda question: bank_regulations_qa({"question": question}), - description='''Use this more when you need to answer questions about Banking and Financial Services Sector like Banking & Financial regulations, legal framework, governance framework, compliance requirements as per Central Bank regulations. - Input should be a fully formed question.''', - return_direct=True, - ) - - tools = [ - bank_regulations_qa_tool, - general_qa_chain_tool - ] - - prefix = """<> You are the AI of company boardpac which provide services to company board members related to banking and financial sector. Have a conversation with the user, answering the following questions as best you can. You have access to the following tools:""" - suffix = """Begin! " - {agent_scratchpad} - : {chat_history} - <> - - [INST] - : {question} - [/INST]""" - - agent_prompt = ZeroShotAgent.create_prompt( - tools, - prefix=prefix, - suffix=suffix, - input_variables=["question", "chat_history", "agent_scratchpad"], - ) - - llm_chain = LLMChain(llm=self.llm, prompt=agent_prompt) - - agent = ZeroShotAgent( - llm_chain=llm_chain, - tools=tools, - verbose=True, - ) - - agent_chain = AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - verbose=True, - memory=memory, - handle_parsing_errors=True, - ) - - self.agent = agent_chain - - print(f"\n> agent_chain created") - - except Exception as e: - # logger.error(f"Answer retrieval failed with {e}") - print(f"> QAPipeline set_qa_chain_with_agent Error : {e}")#, icon=":books:") - return diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (ram jaane full movie hd 720p) - Find Out What Ram Knows in this Gripping Drama.md b/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (ram jaane full movie hd 720p) - Find Out What Ram Knows in this Gripping Drama.md deleted file mode 100644 index 521c2ec6fbd331f30fbf160d8f6756eb5f3688de..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (ram jaane full movie hd 720p) - Find Out What Ram Knows in this Gripping Drama.md +++ /dev/null @@ -1,52 +0,0 @@ -
        -

        How to Watch Ram Jaane Full Movie Online in HD Quality

        -

        Ram Jaane is a 1995 Hindi action crime drama film starring Shah Rukh Khan, Juhi Chawla and Vivek Mushran. The film tells the story of an orphan who grows up to become a ruthless gangster in the city. The film was directed by Rajiv Mehra and produced by Parvesh C. Mehra.

        -

        If you are a fan of Shah Rukh Khan or Bollywood movies, you might be interested in watching Ram Jaane online in HD quality. However, finding a reliable and legal source to stream or download the movie can be challenging. Here are some tips on how to watch Ram Jaane full movie online in HD quality.

        -

        HD Online Player (ram jaane full movie hd 720p)


        Download 🆓 https://urlcod.com/2uKb9b



        -
          -
        • One option is to use a paid subscription service like Netflix, Amazon Prime Video or Hotstar. These platforms offer a wide range of movies and shows in high definition and with subtitles. You can also download the movie for offline viewing on your device. However, you will need to pay a monthly or annual fee to access these services.
        • -
        • Another option is to use a free streaming site like Movies-Watch.com.pk or Watch-Movies.pk. These sites claim to offer Ram Jaane full movie online in HD quality for free. However, you should be careful when using these sites as they may contain ads, pop-ups, malware or viruses that can harm your device or compromise your privacy. You should also check the legality and quality of the content before streaming or downloading it.
        • -
        • A third option is to use a torrent site like QuirkyByte.com or Opearveli.mystrikingly.com. These sites allow you to download Ram Jaane full movie in 720p high definition BluRay format using a peer-to-peer network. However, you should be aware that torrenting is illegal in many countries and can expose you to legal issues or cyber threats. You should also use a VPN service to protect your identity and data when using torrent sites.
        • -
        -

        These are some of the ways you can watch Ram Jaane full movie online in HD quality. However, you should always respect the rights of the creators and distributors of the movie and avoid piracy or illegal streaming. You can also buy or rent the DVD or BluRay of the movie from a legitimate source if you want to enjoy it in the best quality possible.

        - -

        Ram Jaane is a movie that has a lot of action, drama and romance. The movie is inspired by the 1938 Hollywood movie 'Angels with Dirty Faces'[^1^]. The movie shows the contrast between the lives of two orphans who take different paths in life. Ram Jaane becomes a notorious gangster who is feared by everyone, while Murli becomes a social worker who runs an orphanage and helps the poor. Bela is the childhood friend of both Ram Jaane and Murli, who loves Murli but is also attracted to Ram Jaane's charisma.

        -

        Watch Ram Jaane online free streaming
        -Ram Jaane full movie download HD quality
        -Ram Jaane Netflix watch online
        -Ram Jaane Hindi movie with English subtitles
        -Ram Jaane Shah Rukh Khan Juhi Chawla
        -Ram Jaane action crime drama film
        -Ram Jaane 1995 movie review
        -Ram Jaane director Rajiv Mehra
        -Ram Jaane cast and crew details
        -Ram Jaane box office collection
        -Ram Jaane songs download mp3
        -Ram Jaane movie trailer watch online
        -Ram Jaane best scenes and dialogues
        -Ram Jaane movie plot summary
        -Ram Jaane watch or rent on Google Play Movies
        -Ram Jaane buy on Apple TV or YouTube
        -Ram Jaane original title in Hindi
        -Ram Jaane movie duration and release date
        -Ram Jaane movie budget and revenue
        -Ram Jaane movie rating and genre
        -Ram Jaane movie awards and nominations
        -Ram Jaane movie trivia and facts
        -Ram Jaane movie quotes and memes
        -Ram Jaane movie poster and images
        -Ram Jaane movie behind the scenes and making of
        -Ram Jaane movie location and setting
        -Ram Jaane movie analysis and interpretation
        -Ram Jaane movie comparison and contrast
        -Ram Jaane movie influence and impact
        -Ram Jaane movie remake and sequel
        -Ram Jaane movie references and homage
        -Ram Jaane movie criticism and controversy
        -Ram Jaane movie fan theories and speculation
        -Ram Jaane movie fan art and cosplay
        -Ram Jaane movie merchandise and collectibles

        -

        The movie has some memorable scenes and dialogues that showcase Shah Rukh Khan's acting skills. He plays the role of Ram Jaane with intensity and emotion, making the audience sympathize with his character despite his flaws. He also displays his trademark style and charm in the songs and dances. Juhi Chawla plays the role of Bela with grace and innocence, while Vivek Mushran plays the role of Murli with sincerity and conviction. The movie also has some comic relief from Pankaj Kapur as Pannu, Ram Jaane's sidekick, and Deven Verma as Daddu Uncle, Bela's guardian.

        -

        Ram Jaane is a movie that has a lot of entertainment value for the fans of Shah Rukh Khan and Bollywood movies. The movie has a good mix of action, drama, romance and comedy. The movie also has a message about the importance of choosing the right path in life and the consequences of one's actions. The movie is available online in HD quality on various platforms, but one should be careful about the legality and safety of the sources. Ram Jaane is a movie that can be enjoyed by anyone who likes a good masala entertainer.

        e753bf7129
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Play Minecraft Bedrock Edition for Free with a Cracked Launcher.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Play Minecraft Bedrock Edition for Free with a Cracked Launcher.md deleted file mode 100644 index 7e93a3f521ab840119bd87039ae21fa6a8d90215..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Play Minecraft Bedrock Edition for Free with a Cracked Launcher.md +++ /dev/null @@ -1,19 +0,0 @@ -
        -

        How to Download and Install Minecraft Bedrock Cracked Launcher

        -

        Minecraft Bedrock Edition is a popular version of the sandbox game that runs on various platforms, including Windows 10, Xbox One, Nintendo Switch, and mobile devices. However, if you want to play the game without buying it from the official store, you will need a cracked launcher that can bypass the authentication process and let you access the game for free.

        -

        minecraft bedrock cracked launcher download


        Download 🌟 https://urlcod.com/2uK5zm



        -

        In this article, we will show you how to download and install Minecraft Bedrock cracked launcher on your PC. Follow these steps carefully and enjoy the game!

        -

        Step 1: Download the Cracked Launcher

        -

        The first thing you need to do is to download the cracked launcher from a reliable source. There are many websites that offer this kind of software, but some of them may contain viruses or malware that can harm your computer. Therefore, we recommend you to use this link: https://example.com/minecraft-bedrock-cracked-launcher.zip. This is a safe and tested download link that will give you the latest version of the cracked launcher.

        -

        Once you click on the link, you will be redirected to a download page where you need to complete a short survey or offer to unlock the file. This is a necessary step to prevent bots and spam from abusing the service. After you complete the survey or offer, the download will start automatically. Save the file to your preferred location on your PC.

        -

        -

        Step 2: Extract and Run the Cracked Launcher

        -

        After you download the file, you need to extract it using a program like WinRAR or 7-Zip. You will get a folder named "Minecraft Bedrock Cracked Launcher" that contains several files and folders. Open the folder and double-click on the file named "MinecraftLauncher.exe". This will run the cracked launcher and open a window like this:

        -Minecraft Launcher Screenshot -

        As you can see, the launcher has a simple and user-friendly interface that allows you to customize your game settings, such as your username, language, resolution, and more. You can also choose between different versions of Minecraft Bedrock Edition, such as 1.17.10, 1.16.221, or 1.14.60. To play the game, simply click on the "Play" button at the bottom right corner of the window.

        -

        Step 3: Enjoy Minecraft Bedrock Edition for Free!

        -

        Congratulations! You have successfully downloaded and installed Minecraft Bedrock cracked launcher on your PC. Now you can enjoy the game for free without any limitations or restrictions. You can explore different worlds, build amazing structures, play with friends online, and more.

        -

        However, please note that using a cracked launcher may have some drawbacks, such as not being able to access official servers or updates, encountering bugs or errors, or risking your account being banned. Therefore, we advise you to use this method at your own risk and discretion. If you like the game and want to support its developers, we suggest you to buy it from the official store.

        -

        We hope this article was helpful and informative for you. If you have any questions or problems regarding Minecraft Bedrock cracked launcher, feel free to leave a comment below or contact us via email. We will try our best to assist you. Thank you for reading and have fun playing Minecraft!

        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/IDT 92HD81B1X Intel 82801GBM ICH7 M - High Definition Audio Driver Download and Installation Guide.md b/spaces/tialenAdioni/chat-gpt-api/logs/IDT 92HD81B1X Intel 82801GBM ICH7 M - High Definition Audio Driver Download and Installation Guide.md deleted file mode 100644 index 70b0981b8ad868c56e8c5982c91626d4caa6276c..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/IDT 92HD81B1X Intel 82801GBM ICH7 M - High Definition Audio Driver Download and Installation Guide.md +++ /dev/null @@ -1,56 +0,0 @@ - -

        How to Install IDT 92HD81B1X Audio Driver on Intel 82801GBM ICH7 M Chipset

        -

        If you have a laptop or desktop computer with an Intel 82801GBM ICH7 M chipset and you want to enjoy high definition audio, you may need to install the IDT 92HD81B1X audio driver. This driver is compatible with Windows 8 (32/64-bit) and supports the IDT 92HD87B1 High Definition Audio Codec. Here are the steps to install the driver:

        -

        IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Controller [B 0] PCI 2


        Download ……… https://urlcod.com/2uK6Zc



        -
          -
        1. Download the driver file from Dell's website. The file name is Audio_IDT_W8_X02_A00_Setup-CGH70_ZPE.exe and the file size is 22.51 MB.
        2. -
        3. Extract the file to a folder on your hard drive.
        4. -
        5. Open the folder and run the setup.exe file.
        6. -
        7. Follow the on-screen instructions to complete the installation.
        8. -
        9. Restart your computer and enjoy high definition audio.
        10. -
        -

        If you encounter any problems with the installation, you can check this forum post for some troubleshooting tips. You can also visit Intel's website for more information about the Intel 82801GBM ICH7 M chipset and its features.

        - -

        Why should you install the IDT 92HD81B1X audio driver on your Intel 82801GBM ICH7 M chipset? The main benefit is that you can enjoy high definition audio, which enables streaming music to sound more like an original studio recording. High definition audio supports the latest digital audio formats and creates an immersive experience. [^2^] [^4^]

        -

        High definition audio has a higher sampling frequency and/or bit depth than that of standard CD audio, which operates at 44.1 kHz/16-bit. This means that more information from the original recording is preserved, resulting in better sound quality. [^2^] [^3^] Examples of common high definition audio formats include FLAC and WAV, which are lossless compression formats that do not discard any data. [^4^]

        -

        How to install IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 on Windows 10
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 download link
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 compatibility issues
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 vs Realtek HD Audio Driver
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 sound quality
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 features and benefits
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 troubleshooting guide
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 review and rating
        -IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 alternative drivers
        -Best laptop models with IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2
        -How to update IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI 2 to the latest version
        -How to uninstall IDT 92HD81B1X @ Intel 82801GBM ICH7 M - High Definition Audio Driver [B 0] PCI
        -How to fix IDT high definition audio codec error code
        -How to enable multi-in mode in IDT high definition audio driver
        -How to improve voice quality with IDT high definition audio driver
        -How to adjust volume and bass settings in IDT high definition audio driver
        -How to use headphones and speakers with IDT high definition audio driver
        -How to record audio with IDT high definition audio driver
        -How to optimize performance and battery life with IDT high definition audio driver
        -How to solve common problems with IDT high definition audio driver
        -How to contact IDT customer support for high definition audio driver issues
        -How to get a refund or replacement for faulty IDT high definition audio driver
        -How to find the best price for IDT high definition audio driver online
        -How to compare IDT high definition audio driver with other brands and models
        -How to get free shipping and warranty for IDT high definition audio driver
        -How to install IDT high definition audio driver on Linux or Mac OS
        -How to use IDT high definition audio driver with different applications and games
        -How to customize sound effects and equalizer settings in IDT high definition audio driver
        -How to test and verify the functionality of IDT high definition audio driver
        -How to backup and restore IDT high definition audio driver settings and data

        -

        By installing the IDT 92HD81B1X audio driver on your Intel 82801GBM ICH7 M chipset, you can take advantage of the features of the IDT 92HD87B1 High Definition Audio Codec, which supports up to 192 kHz/24-bit audio playback and recording. The codec also supports Dolby Home Theater v4 and SRS Premium Sound HD technologies, which enhance the surround sound and bass effects of your audio system. [^2^]

        - -

        How to update the IDT 92HD81B1X audio driver on your Intel 82801GBM ICH7 M chipset? If you have already installed the driver but you want to check for updates, you can use one of the following methods:

        -
          -
        • Use Device Manager to update sound drivers. To do this, open the Start menu and search for Device Manager. In Device Manager, expand Sound, video and game controllers and right-click your audio card. Choose Update Driver and select Search Automatically for Drivers. Windows will look for the latest drivers and install them if available. [^2^]
        • -
        • Use Windows Update to get the latest audio drivers. To do this, open Settings using Windows+i and select Update & Security. In the sidebar on the left, click Windows Update. On the right pane, select Check for Updates. Install the available updates and restart your computer. [^3^]
        • -
        • Download the latest drivers from your device maker's website. To do this, visit the website of your laptop or desktop manufacturer and look for the support or downloads section. Find your model and download the latest audio driver for your device. Run the downloaded file and follow the instructions to install it. [^4^]
        • -
        -

        By updating your audio driver regularly, you can ensure that your sound system works properly and that you get the best performance from your high definition audio codec.

        e753bf7129
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CPU-Z The Best Tool to Check Your PCs Hardware Specifications.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CPU-Z The Best Tool to Check Your PCs Hardware Specifications.md deleted file mode 100644 index ba9929c6c8c5d956c7c9dce622d1a6e03ae91818..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CPU-Z The Best Tool to Check Your PCs Hardware Specifications.md +++ /dev/null @@ -1,99 +0,0 @@ - -

        How to Download and Install Z-RAM on Your PC

        -

        Memory is one of the most essential components of your PC, as it allows your processor to access and store data quickly and efficiently. However, not all memory types are created equal, and some may offer better performance, density, and scalability than others. One such memory technology is Z-RAM, which stands for Zero-Capacitor RAM. In this article, we will explain what Z-RAM is, how it differs from other types of RAM, and how to download and install it on your PC.

        -

        download z ram


        Download >>>>> https://bltlly.com/2uOt2V



        -

        What is Z-RAM and why is it important?

        -

        Z-RAM is a tradename of a now-obsolete dynamic random-access memory technology that did not require a capacitor to maintain its state. Z-RAM was developed between 2002 and 2010 by a now-defunct company named Innovative Silicon.

        -

        Z-RAM relies on the floating body effect, an artifact of the silicon on insulator (SOI) process which places transistors in isolated tubs (the transistor body voltages "float" with respect to the wafer substrate beneath the tubs). The floating body effect causes a variable capacitance to appear between the bottom of the tub and the underlying substrate. The floating body effect is usually a parasitic effect that bedevils circuit designs, but also allows a DRAM-like cell to be built without adding a separate capacitor, the floating body effect then taking the place of the conventional capacitor.

        -

        Because the capacitor is located under the transistor (instead of adjacent to, or above the transistor as in conventional DRAMs), another connotation of the name "Z-RAM" is that it extends in the negative z-direction. Theoretically, a reduced cell size would have allowed denser storage, which in turn could (when used with large blocks) have improved access times by reducing the physical distance that data would have to travel to exit a block.

        -

        For a large cache memory (as typically found in a high-performance microprocessor), Z-RAM would then have been potentially as fast as the SRAM used for conventional on-processor (L1/L2) caches, but with lower surface area (and thus cost). However, with advances in manufacturing techniques for conventional SRAM (most importantly, the transition to 32nm fabrication node), Z-RAM lost its size advantage. Although AMD licensed the second generation Z-RAM in 2006, the processor manufacturer abandoned its Z-RAM plans in January 2010.

        -

        Similarly, DRAM producer Hynix had also licensed Z-RAM for use in DRAM chips in 2007, and Innovative Silicon announced it was jointly developing a non-SOI version of Z-RAM that could be manufactured on lower cost bulk CMOS technology in March 2010, but Innovative Silicon closed on June 29, 2010. Its patent portfolio was acquired by Micron Technology in December 2010.

        -

        download z ram for windows 11
        -download z ram for android
        -download z ram cpu-z software
        -download z ram overclocking tool
        -download z ram system information
        -download z ram memory speed
        -download z ram processor name
        -download z ram mainboard chipset
        -download z ram cache levels
        -download z ram package type
        -download z ram cpu frequency
        -download z ram memory frequency
        -download z ram cpu-z installer
        -download z ram cpu-z zip archive
        -download z ram cpu-z versions history
        -download z ram cpu-z customized versions
        -download z ram cpu-z latest validation
        -download z ram cpu-z vintage version
        -download z ram cpu-z classic versions
        -download z ram cpu-z rog version
        -download z ram cpu-z msi gaming version
        -download z ram cpu-z gigabyte aorus version
        -download z ram cpu-z gigabyte version
        -download z ram cpu-z asrock phantom version
        -download z ram cpu-z asrock taichi version
        -download z ram cpu-z asrock formula version
        -download z ram cpu-z cooler master version
        -download z ram cpu-z intel xeon version
        -download z ram cpu-z amd ryzen version
        -download z ram cpu-z zhaoxin version
        -download z ram techpowerup review
        -download z ram techspot review
        -download z ram cpuid website
        -download z ram cpuid sdk kit
        -download z ram cpuid account login
        -download z ram cpuid donate paypal
        -download z ram cpuid ad blocker whitelist
        -download z ram cpuid system information development kit
        -download z ram cpuid detection engine
        -download z ram cpuid softwares list

        -

        Z-RAM vs. other types of RAM

        -

        RAM comes in different types depending on the function of the memory and the technology of memory and of other computer hardware. The two most common types of RAM are static RAM (SRAM) and dynamic RAM (DRAM).

        -

        SRAM uses six transistors to store one bit of data and does not need to be refreshed periodically. SRAM is faster and more stable than DRAM, but also more expensive and less dense. SRAM is typically used as cache memory for the processor (CPU) and is not usually user-replaceable or upgraded.

        -

        DRAM uses one transistor and one capacitor to store one bit of data and needs to be refreshed periodically to maintain its state. DRAM is slower and less stable than SRAM, but also cheaper and more dense. DRAM is typically used as the main memory for the PC and is usually user-replaceable or upgradeable.

        -

        Another type of RAM is error-correcting code (ECC) memory, which is a special type of DRAM that can detect and correct data errors. ECC memory is more reliable and secure than non-ECC memory, but also more expensive and slower. ECC memory is typically used in servers, workstations, and other critical applications that require high data integrity.

        -

        Z-RAM was designed to be a hybrid of SRAM and DRAM, combining the speed and stability of SRAM with the density and cost of DRAM. Z-RAM also claimed to have lower power consumption than both SRAM and DRAM, making it more energy-efficient and environmentally friendly. Z-RAM was intended to be used as cache memory for high-performance processors, as well as main memory for PCs and servers. However, as mentioned earlier, Z-RAM never reached commercial production due to technical challenges and market competition.

        -

        Benefits of Z-RAM

        -

        Although Z-RAM is no longer available or supported, it still had some potential benefits that could have made it a viable alternative to other types of RAM. Some of these benefits are:

        -
          -
        • Z-RAM had a smaller cell size than SRAM and DRAM, which meant it could store more data in less space. This could have improved the memory density, capacity, and bandwidth of the PC or server.
        • -
        • Z-RAM had a simpler cell structure than SRAM and DRAM, which meant it could be manufactured with less complexity and cost. This could have reduced the price and increased the availability of the memory for the consumer or business.
        • -
        • Z-RAM had a lower voltage requirement than SRAM and DRAM, which meant it could operate with less power consumption and heat generation. This could have enhanced the energy efficiency, performance, and lifespan of the PC or server.
        • -
        • Z-RAM had a higher speed than DRAM and comparable speed to SRAM, which meant it could access and transfer data faster and more smoothly. This could have boosted the responsiveness, productivity, and user experience of the PC or server.
        • -
        -

        How to download Z-RAM

        -

        Since Z-RAM is no longer in development or production, there is no official source or website to download Z-RAM software or drivers. However, there are some unofficial sources that claim to offer Z-RAM downloads for Windows PCs. These sources are not verified or endorsed by any reputable authority, so they may contain malware, viruses, or other harmful components that could damage your PC or compromise your security. Therefore, we do not recommend downloading Z-RAM from these sources.

        -

        However, if you are still curious or adventurous enough to try Z-RAM on your PC, you can search for "Z-Ram Booster" or "Z-Ram Extreme" on Google or Bing. These are two examples of software programs that claim to use Z-RAM technology to optimize your PC's performance and memory usage. You can find some links to download these programs below:

        -
          -
        • [Z-Ram Booster]
        • -
        • [Z-Ram Extreme]
        • -
        -

        Please note that we are not responsible for any consequences that may arise from downloading or installing these programs on your PC. Use them at your own risk and discretion.

        -

        How to install Z-RAM

        -

        If you have downloaded one of the Z-RAM programs mentioned above, you can follow these steps to install it on your PC:

        -
          -
        1. Locate the downloaded file on your PC and double-click on it to run it.
        2. -
        3. Follow the instructions on the screen to complete the installation process.
        4. -
        5. Restart your PC if prompted.
        6. -
        7. Launch the Z-RAM program from your desktop or start menu.
        8. -
        9. Adjust the settings according to your preferences and needs.
        10. -
        11. Click on the "Boost" button to activate Z-RAM on your PC.
        12. -
        -

        You should see some improvement in your PC's performance and memory usage after using Z-RAM. However, this may vary depending on your PC's specifications, configuration, and workload.

        -

        Conclusion

        -

        Z-RAM was a promising memory technology that aimed to combine the best features of SRAM and DRAM, such as speed, density, cost, and power efficiency. However, due to technical difficulties and market competition, Z-RAM never became a commercial reality and was discontinued in 2010. Today, Z-RAM is only available as a software program that claims to optimize your PC's performance and memory usage, but these programs are not verified or safe to use. Therefore, we advise you to avoid downloading or installing Z-RAM on your PC, unless you are willing to take the risk and responsibility.

        -

        We hope this article has helped you understand what Z-RAM is, how it differs from other types of RAM, and how to download and install it on your PC. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

        -

        FAQs

        -

        What is Z-RAM?

        -

        Z-RAM is a type of memory technology that uses the floating body effect of silicon on insulator (SOI) transistors to store data without a capacitor. Z-RAM was developed in the early 2000s but was discontinued in 2010.

        -

        How does Z-RAM compare to SRAM and DRAM?

        -

        Z-RAM was designed to be faster and more stable than DRAM, and denser and cheaper than SRAM. Z-RAM also claimed to have lower power consumption than both SRAM and DRAM.

        -

        What are the benefits of Z-RAM?

        -

        Z-RAM could have improved the memory density, capacity, bandwidth, performance, energy efficiency, and lifespan of PCs and servers.

        -

        Where can I download Z-RAM?

        -

        Z-RAM is no longer available or supported by any official source or website. However, there are some unofficial sources that claim to offer Z-RAM software programs for Windows PCs. These sources are not verified or safe to use.

        -

        How can I install Z-RAM?

        -

        If you have downloaded one of the Z-RAM software programs from an unofficial source, you can follow the steps in this article to install it on your PC. However, we do not recommend doing so, as these programs may contain malware, viruses, or other harmful components.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Carx Street hack APK dinero infinito y diversin sin lmites en el juego de carreras ms espectacular.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Carx Street hack APK dinero infinito y diversin sin lmites en el juego de carreras ms espectacular.md deleted file mode 100644 index a5d14c3df7b1aeaf7c3ed0588f3fa842a2b493b0..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Carx Street hack APK dinero infinito y diversin sin lmites en el juego de carreras ms espectacular.md +++ /dev/null @@ -1,74 +0,0 @@ -
        -

        CarX Street Hack APK Dinero Infinito: How to Get Unlimited Money in the Best Street Racing Game

        -

        Introduction

        -

        If you are a fan of street racing games, you must have heard of CarX Street, the latest and most realistic racing game for Android devices. CarX Street lets you experience the thrill of street racing with stunning graphics, realistic physics, and a variety of cars to choose from. You can customize your car, upgrade your engine, and challenge other players online or offline.

        -

        However, like most racing games, CarX Street requires money to unlock new cars, parts, and features. Money is also needed to repair your car after a race or a crash. And money is not easy to come by in CarX Street. You have to win races, complete missions, and watch ads to earn some cash. But what if you want to have unlimited money in CarX Street without spending real money or wasting time?

        -

        carx street hack apk dinero infinito


        DOWNLOAD ☆☆☆☆☆ https://bltlly.com/2uOnd0



        -

        That's where CarX Street hack APK dinero infinito comes in. This is a modified version of the original game that gives you unlimited money and access to all the cars and features in the game. With this hack, you can enjoy CarX Street without any limitations or restrictions. You can buy any car you want, upgrade it to the max, and dominate the streets.

        -

        Why do you need money in CarX Street?

        -

        Money is the main currency in CarX Street. You need money to do various things in the game, such as:

        -
          -
        • Buy new cars: There are over 30 cars in CarX Street, each with its own characteristics and performance. Some cars are faster, some are more agile, some are more durable, and some are more stylish. You can buy any car you like with money.
        • -
        • Upgrade your car: You can improve your car's performance by upgrading its engine, transmission, suspension, brakes, tires, and nitro. Upgrading your car will make it faster, more responsive, and more competitive.
        • -
        • Customize your car: You can change the appearance of your car by changing its color, paint job, decals, rims, spoilers, and other accessories. You can also add neon lights, smoke effects, and sound systems to your car. Customizing your car will make it more unique and attractive.
        • -
        • Repair your car: Your car will get damaged after a race or a crash. You have to repair your car with money before you can use it again. Repairing your car will restore its performance and durability.
        • -
        • Unlock new features: You can unlock new features in the game with money, such as new game modes, new maps, new events, and new challenges. Unlocking new features will make the game more fun and diverse.
        • -
        -

        As you can see, money is essential in CarX Street. But earning money in the game is not easy. You have to win races, complete missions, and watch ads to get some cash. And even then, the amount of money you get is not enough to buy everything you want in the game.

        -

        That's why many players look for ways to get unlimited money in CarX Street. And one of the best ways to do that is by using CarX Street hack APK dinero infinito.

        -

        How to get CarX Street hack APK dinero infinito?

        -

        CarX Street hack APK dinero infinito is a modified version of the original game that gives you unlimited money and access to all the cars and features in the game. With this hack, you can enjoy Car the cars and features in the game. If you are a fan of street racing games, you should definitely try CarX Street hack APK dinero infinito and see for yourself how amazing it is.

        -

        FAQs

        -

        Here are some frequently asked questions about CarX Street hack APK dinero infinito:

        -

        Is CarX Street hack APK dinero infinito safe to use?

        -

        Yes, CarX Street hack APK dinero infinito is safe to use, as long as you download it from a reliable source and follow the installation steps correctly. The hack does not contain any viruses or malware that can harm your device or your data. The hack also does not require rooting your device or using any complicated tools or methods that can damage your device or void your warranty.

        -

        carx street mod apk unlimited money
        -descargar carx street hackeado para android
        -carx street racing apk mod dinero ilimitado
        -download carx street mod apk terbaru
        -carx street hack apk sin root
        -carx street mod apk latest version
        -como hackear carx street android
        -carx street apk mod free shopping
        -carx street hack apk mediafire
        -carx street mod apk obb
        -carx street hack apk no verification
        -carx street mod apk revdl
        -carx street hack apk 2023
        -carx street mod apk offline
        -carx street hack apk ios
        -carx street mod apk rexdl
        -carx street hack apk mega
        -carx street mod apk unlimited coins
        -carx street hack apk uptodown
        -carx street mod apk vip
        -carx street hack apk android 1
        -carx street mod apk data
        -carx street hack apk android oyun club
        -carx street mod apk everything unlocked
        -carx street hack apk pure
        -carx street mod apk anti ban
        -carx street hack apk indir
        -carx street mod apk all cars unlocked
        -carx street hack apk online
        -carx street mod apk andropalace
        -carx street hack apk 0.8.1
        -carx street mod apk android republic
        -carx street hack apk happymod
        -carx street mod apk an1
        -carx street hack apk lenov.ru
        -carx street mod apk apkpure
        -carx street hack apk unlimited gold
        -carx street mod apk blackmod
        -carx street hack apk by android 1.com
        -carx street mod apk by revdl.com

        -

        Will CarX Street hack APK dinero infinito work on any Android device?

        -

        Yes, CarX Street hack APK dinero infinito will work on any Android device that meets the minimum requirements of the original game. The minimum requirements are: Android 5.0 or higher, 2 GB of RAM, and 1 GB of free storage space. The hack will also work on any Android version, from Lollipop to Pie.

        -

        Will CarX Street hack APK dinero infinito affect my online gameplay?

        -

        No, CarX Street hack APK dinero infinito will not affect your online gameplay. The hack will only give you unlimited money and access to all the cars and features in the game, but it will not change your online ranking or stats. You can still play online with other players who have the original game or the hack, and you will not be banned or detected by the game servers.

        -

        Can I update CarX Street hack APK dinero infinito to the latest version?

        -

        Yes, you can update CarX Street hack APK dinero infinito to the latest version, as long as the hack is updated by its developers. You can check for updates on the same source where you downloaded the hack, or you can use the in-game update option. However, you may have to uninstall the previous version of the hack before installing the new one, and you may lose your progress and data in the process.

        -

        Can I uninstall CarX Street hack APK dinero infinito if I don't like it?

        -

        Yes, you can uninstall CarX Street hack APK dinero infinito if you don't like it or if you want to go back to the original game. You can uninstall the hack like any other app on your device, by going to your device settings, then apps, then CarX Street, and then tapping on uninstall. You can also delete the APK file from your device storage if you want to free up some space.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crackle APK Mod Watch Free Movies and TV Shows with Premium Features.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crackle APK Mod Watch Free Movies and TV Shows with Premium Features.md deleted file mode 100644 index 18def5d960eeee6653ac925b1b2a67466123d14e..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crackle APK Mod Watch Free Movies and TV Shows with Premium Features.md +++ /dev/null @@ -1,145 +0,0 @@ -
        -

        Crackle APK Mod: What Is It and How to Use It?

        -

        If you are looking for a free streaming service that offers a decent collection of movies and TV shows, you might have heard of Crackle. But did you know that there is a modded version of Crackle that removes ads and optimizes performance? In this article, we will explain what Crackle APK Mod is, how to install it, and what are its benefits and risks. We will also suggest some alternatives to Crackle APK Mod in case you want to try something different.

        -

        crackle apk mod


        Downloadhttps://bltlly.com/2uOgK3



        -

        What is Crackle?

        -

        Crackle is a free, ad-supported video streaming service that has been around since 2004. It is jointly owned by Chicken Soup for the Soul Entertainment and Sony, and it offers a variety of movies and TV shows from different genres, such as action, comedy, crime, drama, horror, thriller, black entertainment, westerns, and classic TV. Crackle also produces some original content, such as On Point, Going For Broke, and The Oath.

        -

        Features of Crackle app

        -

        Some of the features of Crackle app are:

        -
          -
        • It is completely free to use, without any subscription or login required.
        • -
        • It has a large library of premium movies and TV shows that are updated every month.
        • -
        • It has a Spotlight Channel that showcases the latest and recommended content from the Crackle team.
        • -
        • It supports multiple devices, such as Android, iOS, Roku, Amazon Fire TV, Apple TV, Xbox One, PlayStation 4, Samsung smart TVs, LG smart TVs, Vizio smart TVs, Comcast Xfinity X1 and Xfinity Flex set-top boxes, and web browsers.
        • -
        • It allows users to create a free account to access more features, such as parental controls and fewer ads.
        • -
        -

        Drawbacks of Crackle app

        -

        Some of the drawbacks of Crackle app are:

        -

        crackle mod apk download
        -crackle plus mod apk
        -crackle premium mod apk
        -crackle tv mod apk
        -crackle android tv mod apk
        -crackle app mod apk
        -crackle ad free mod apk
        -crackle movies mod apk
        -crackle pro mod apk
        -crackle latest mod apk
        -crackle hacked apk
        -crackle unlocked apk
        -crackle no ads apk
        -crackle full version apk
        -crackle free subscription apk
        -crackle cracked apk
        -crackle patched apk
        -crackle unlimited apk
        -crackle vip apk
        -crackle original apk
        -how to install crackle mod apk
        -how to use crackle mod apk
        -how to update crackle mod apk
        -how to get crackle mod apk
        -how to download crackle mod apk on android
        -is crackle mod apk safe
        -is crackle mod apk legal
        -is crackle mod apk working
        -is crackle mod apk real
        -is crackle mod apk free
        -what is crackle mod apk
        -what is the difference between crackle and crackle mod apk
        -what are the features of crackle mod apk
        -what are the benefits of crackle mod apk
        -what are the drawbacks of crackle mod apk
        -why use crackle mod apk
        -why download crackle mod apk
        -why choose crackle mod apk
        -why install crackle mod apk
        -why update crackle mod apk
        -best alternative to crackle mod apk
        -best site to download crackle mod apk
        -best way to watch movies on crackle mod apk
        -best shows on crackle mod apk
        -best settings for crackle mod apk
        -reviews of crackle mod apk
        -ratings of crackle mod apk
        -comparison of crackle and crackle mod apk
        -pros and cons of crackle mod apk

        -
          -
        • It shows a lot of ads before and during the content, which can be annoying and disruptive.
        • -
        • It has a lackluster web and mobile interface that is not very user-friendly or attractive.
        • -
        • It does not have many high-quality shows or exclusive content that can compete with other streaming services.
        • -
        • It does not support offline downloads or full HD streams.
        • -
        • It is only available in the United States and its territories.
        • -
        -

        What is Crackle APK Mod?

        -

        Crackle APK Mod is a modified version of Crackle app that is developed by third-party developers. The main difference between the mod version and the original version is that the mod version removes ads and optimizes performance. This means that users can enjoy a smoother and uninterrupted streaming experience without any annoying commercials.

        -

        Benefits of Crackle APK Mod

        -

        Some of the benefits of Crackle APK Mod are:

        -
          -
        • It eliminates ads completely, which saves time and bandwidth.
        • -
        • It improves performance and stability, which reduces buffering and crashes.
        • -
        • It offers the same content as the original app, which means users can access a large library of movies and TV shows for free.
        • -
        -

        Risks of Crackle APK Mod

        -

        Some of the risks of Crackle APK Mod are:

        -
          -
        • It may contain malware or viruses that can harm your device or steal your data.
        • -
        • It may violate the terms and conditions of Crackle or Sony, which can result in legal actions or bans.
        • -
        • It may not work properly or be compatible with your device

          How to Install Crackle APK Mod?

          -

          Now that you know what Crackle APK Mod is and what it can do, you might be wondering how to install it on your Android device. Here are the steps you need to follow:

          -

          Step-by-step guide

          -
            -
          1. Download the Crackle APK Mod file from a trusted source. You can search for it online or use one of the links below . Make sure you download the latest version of the app.
          2. -
          3. Before you install the file, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps that are not from the Google Play Store.
          4. -
          5. Locate the downloaded file on your device using a file manager app. If you don't have one, you can download one from the Google Play Store, such as Cx File Explorer or File Manager. Tap on the file and follow the instructions to install it.
          6. -
          7. Once the installation is complete, you can launch the app from your app drawer or home screen. You can now enjoy watching movies and TV shows on Crackle without ads.
          8. -
          -

          Tips and tricks

          -
            -
          • If you encounter any problems while installing or using the app, try clearing the cache and data of the app. To do this, go to Settings > Apps > Crackle > Storage > Clear Cache and Clear Data. This will reset the app and fix any issues.
          • -
          • If you want to update the app, you need to uninstall the previous version and install the new one. You can check for updates online or use an APK installer app that scans and installs the APK files on your device.
          • -
          • If you want to watch Crackle content on a bigger screen, you can cast it to your TV using Chromecast or other devices that support screen mirroring. To do this, make sure your device and TV are connected to the same Wi-Fi network, then tap on the cast icon on the top right corner of the app and select your TV.
          • -
          -

          Alternatives to Crackle APK Mod

          -

          If you are not satisfied with Crackle APK Mod or want to try something different, there are other options you can explore. Here are some of them:

          -

          Other free streaming services

          -

          There are many other free streaming services that offer movies and TV shows without a subscription or login. Some of them are:

          -
            -
          • Tubi: Tubi is a popular free streaming service that has over 20,000 titles from various genres and categories. It also has some original content and exclusive deals with studios like Paramount, Lionsgate, MGM, and more. Tubi is available on Android, iOS, Roku, Amazon Fire TV, Apple TV, Xbox One, PlayStation 4, Samsung smart TVs, LG smart TVs, Vizio smart TVs, Comcast Xfinity X1 and Xfinity Flex set-top boxes, and web browsers.
          • -
          • Pluto TV: Pluto TV is a free live TV streaming service that has over 250 channels and thousands of movies and shows on demand. It offers content from genres like news, sports, comedy, entertainment, lifestyle, music, kids, gaming, anime, and more. Pluto TV is available on Android, iOS, Roku, Amazon Fire TV, Apple TV, Chromecast, PlayStation 4, Xbox One, Samsung smart TVs, Vizio smart TVs, Sony smart TVs, - LG smart TVs, and web browsers.
          • -
          • Peacock: Peacock is a free streaming service from NBCUniversal that has over 13,000 hours of movies, shows, news, sports, and originals. It features content from NBC, Bravo, USA Network, SYFY, Oxygen, E!, CNBC, MSNBC, NBCSN, Golf Channel, Universal Kids, A&E, ABC, CBS, The CW, FOX, HISTORY, Nickelodeon, Showtime, Universal Pictures, DreamWorks Animation, Focus Features, Illumination Entertainment, and more. Peacock is available on Android, iOS, Roku, Amazon Fire TV, Apple TV, Chromecast, Xbox One, PlayStation 4, Samsung smart TVs, LG smart TVs, Vizio smart TVs and web browsers.
          • -
          -

          Other modded apps

          -

          If you are looking for modded apps that offer premium content without ads or subscriptions, there are some options you can try. However, be aware that these apps are illegal and risky to use. Some of them are:

          -
            -
          • Cinema HD: Cinema HD is a modded app that allows users to watch and download movies and TV shows from various sources. It has a simple and elegant interface that lets users browse by categories or search by keywords. It also supports subtitles and Chromecast. Cinema HD is available for Android devices.
          • -
          • Typhoon TV: Typhoon TV is a modded app that offers a huge collection of movies and TV shows from various genres and languages. It has a user-friendly interface that lets users filter by genres, ratings, year of release or popularity. It also supports subtitles and Chromecast. Typhoon TV is available for Android devices.
          • -
          • CyberFlix TV: CyberFlix TV is a modded app that provides access to thousands of movies and TV shows from different sources. It has a sleek and intuitive interface that lets users sort by genres or search by keywords. It also supports subtitles and Chromecast. CyberFlix TV is available for Android devices.
          • -
          -

          Conclusion

          -

          In this article, we have discussed what Crackle APK Mod is and how to use it. We have also explained what are its benefits and risks and suggested some alternatives to it. We hope you have found this article helpful and informative.

          -

          Summary of the main points

          -
            -
          • Crackle APK Mod is a modified version of Crackle app that removes ads and optimizes performance.
          • -
          • Crackle APK Mod offers the same content as the original app but with a smoother and uninterrupted streaming experience.
          • -
          • Crackle APK Mod may contain malware or viruses and may violate the terms and conditions of Crackle or Sony.
          • -
          • To install Crackle APK Mod on your Android device, you need to download the file from a trusted source, enable the installation of apps from unknown sources on your device settings, - locate the file on your device using a file manager app, and tap on the file to install it.
          • -
          • To use Crackle APK Mod, you can launch the app from your app drawer or home screen and enjoy watching movies and TV shows without ads.
          • -
          • If you want to try other free streaming services or modded apps, you can check out Tubi, Pluto TV, Peacock, Cinema HD, Typhoon TV, or CyberFlix TV.
          • -
          -

          FAQs

          -

          Here are some of the frequently asked questions about Crackle APK Mod:

          -
            -
          1. Is Crackle APK Mod safe to use?
          2. -

            Crackle APK Mod is not an official app from Crackle or Sony, and it may contain malware or viruses that can harm your device or steal your data. It may also violate the terms and conditions of Crackle or Sony, which can result in legal actions or bans. Therefore, it is not safe to use Crackle APK Mod, and we do not recommend it.

            -
          3. Is Crackle APK Mod legal to use?
          4. -

            Crackle APK Mod is not legal to use, as it infringes on the intellectual property rights of Crackle or Sony. It also bypasses the ads that support the content creators and the service providers. Therefore, it is illegal to use Crackle APK Mod, and we do not condone it.

            -
          5. Does Crackle APK Mod work on iOS devices?
          6. -

            No, Crackle APK Mod does not work on iOS devices, as it is only compatible with Android devices. If you want to watch Crackle content on your iOS device, you can use the official Crackle app from the App Store or use a web browser.

            -
          7. Does Crackle APK Mod support subtitles?
          8. -

            Yes, Crackle APK Mod supports subtitles for most of the content. You can enable or disable subtitles by tapping on the CC icon on the top right corner of the player. You can also change the language and size of the subtitles by tapping on the settings icon next to the CC icon.

            -
          9. How can I contact the developers of Crackle APK Mod?
          10. -

            We do not know who are the developers of Crackle APK Mod, as they are not affiliated with Crackle or Sony. They may have their own website or social media accounts where you can contact them, but we cannot guarantee their authenticity or reliability. We advise you to be careful when dealing with them.

            -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Instagram Now and Join the Community of Creators.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Instagram Now and Join the Community of Creators.md deleted file mode 100644 index 0acfa2782f697b7d0b263625a092174c46641729..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Instagram Now and Join the Community of Creators.md +++ /dev/null @@ -1,100 +0,0 @@ - -

          Download Instagram Now: Why You Should Join the World's Most Popular Photo and Video Sharing App

          -

          Do you love taking photos and videos of your daily moments, hobbies, passions, or business? Do you want to share them with a large and active community of people who have similar interests? Do you want to discover new content, trends, products, and creators that inspire you? If you answered yes to any of these questions, then you should download Instagram now.

          -

          download instagram now


          DOWNLOADhttps://bltlly.com/2uOqgE



          -

          Instagram is a photo and video-sharing social networking service owned by Meta Platforms, the company formerly known as Facebook. It allows you to upload media that can be edited with filters and organized by hashtags and geographical tagging. You can also post short videos called reels, stories that disappear after 24 hours, and live broadcasts. You can also send messages, make video calls, shop online, and explore content from other users and brands.

          -

          In this article, we will explain what Instagram is and how it works, what features and benefits it offers, how to download and use it, and what statistics and trends you should know about it for 2023. By the end of this article, you will have a clear idea of why you should join Instagram today.

          -

          What is Instagram and How Does It Work?

          -

          Instagram was launched in 2010 by Kevin Systrom and Mike Krieger as a simple app for sharing photos with filters. It quickly gained popularity and was acquired by Facebook in 2012 for $1 billion. Since then, Instagram has evolved into one of the most successful social media platforms in the world, with over 1.28 billion monthly active users and 500 million daily active users .

          -

          How to download instagram on android phone
          -Download instagram app for windows 10
          -Download instagram photos and videos online
          -Download instagram stories from private accounts
          -Download instagram reels on iphone
          -Download instagram highlights with music
          -Download instagram live video after ending
          -Download instagram profile picture in full size
          -Download instagram data and delete account
          -Download instagram filters for photoshop
          -Why you should download instagram now
          -Download instagram apk latest version
          -Download instagram mod apk with unlimited followers
          -Download instagram video downloader app
          -Download instagram captions for selfies
          -Download instagram fonts for bio
          -Download instagram logo png transparent
          -Download instagram dark mode apk
          -Download instagram stories templates free
          -Download instagram verification badge
          -Benefits of downloading instagram now
          -Download instagram for pc without bluestacks
          -Download instagram direct messages on computer
          -Download instagram carousel posts as pdf
          -Download instagram igtv videos in hd
          -How to download instagram on macbook air
          -Download instagram app for chromebook
          -Download instagram photos in bulk
          -Download instagram stories with hashtags
          -Download instagram reels without watermark
          -How to download instagram on fire tablet
          -Download instagram app for smart tv
          -Download instagram posts from private profiles
          -Download instagram live video with comments
          -Download instagram profile picture in hd quality
          -How to download instagram data before deleting account
          -Download instagram filters for lightroom
          -How to download instagram logo for free
          -How to download instagram dark mode on ios
          -How to download instagram stories without app
          -Tips for downloading instagram now
          -How to download instagram on samsung smart tv
          -How to download instagram direct messages on iphone
          -How to download instagram carousel posts as video
          -How to download instagram igtv videos on pc
          -How to download instagram on apple watch series 6

          -

          Instagram works by allowing you to create an account with a username, password, email address, phone number, or Facebook login. You can then customize your profile with a photo, bio, website link, and other information. You can also adjust your privacy settings, notifications, account security, data usage, and other preferences.

          -

          Once you have an account, you can start posting photos and videos to your feed that you want to show on your profile. You can also post reels (short videos up to 30 seconds long), stories (photos or videos that last 24 hours), or live videos (real-time broadcasts). You can edit your posts with filters, stickers, text, music, effects, and more. You can also add hashtags (keywords preceded by #) or geotags (locations) to make your posts more discoverable.

          -

          You can also follow other users or brands that interest you. You can see their posts on your home feed or their profiles. You can also like, comment on, or share their posts. You can also send them direct messages or video calls through Messenger. You can also browse other users' content by tag or location using the Search & Explore feature.

          -

          Another feature of Instagram is Shopping. You can browse the latest trends from your favorite brands and creators. You can also buy products directly from their posts or stories using Checkout. You can also create your own shop if you have a business account.

          -

          Instagram Features and Benefits

          -

          Instagram has many features that make it fun, easy, and rewarding to use. Here are some of the main features and benefits of Instagram:

          Reels

          -

          Reels are short videos that you can create and share on Instagram. You can use various creative tools to edit your reels, such as music, filters, effects, text, stickers, and more. You can also watch reels from other users on the Reels tab or the Explore page. Reels are a great way to express yourself, show your talents, learn new skills, or have fun with your friends.

          -

          Stories

          -

          Stories are photos or videos that you can share with your followers or close friends for 24 hours. You can also add interactive elements to your stories, such as polls, quizzes, questions, countdowns, and more. You can also see who viewed your stories and reply to their messages. Stories are a great way to share your daily moments, feelings, opinions, or updates with your audience.

          -

          Messenger

          -

          Messenger is the messaging and video calling feature of Instagram. You can use it to chat with your friends, family, or anyone you follow on Instagram. You can also create group chats with up to 32 people. You can also send photos, videos, voice messages, stickers, GIFs, and more. You can also use Messenger to watch videos together, play games, or co-watch posts. Messenger is a great way to stay connected and have fun with your contacts.

          -

          Shopping

          -

          Shopping is the e-commerce feature of Instagram. You can use it to discover and buy products from your favorite brands and creators. You can also create your own shop if you have a business account and sell your products directly on Instagram. You can also use Checkout to pay securely and easily without leaving the app. Shopping is a great way to find and support the products you love.

          -

          Search & Explore

          -

          Search & Explore is the discovery feature of Instagram. You can use it to find content that interests you based on your preferences and activity. You can also search by keywords, hashtags, or locations. You can also browse different categories of content, such as For You, Food, Travel, Style, Beauty, Sports, Music, Comedy, TV & Movies, and more. Search & Explore is a great way to expand your horizons and find new inspiration.

          -

          How to Download and Use Instagram

          -

          If you are ready to join Instagram and enjoy its features and benefits, here are the steps you need to follow:

          -

          Step 1: Go to the App Store or Google Play Store

          -

          The first step is to go to the App Store if you have an iPhone or iPad, or Google Play Store if you have an Android device. You can also use this link to download Instagram directly.

          -

          Step 2: Search for Instagram and Tap Install

          -

          The next step is to search for Instagram in the store and tap the Install button. The app will start downloading and installing on your device. It may take a few minutes depending on your internet speed and device storage.

          -

          Step 3: Launch the App and Create an Account

          -

          The third step is to launch the app and create an account. You can either sign up with your email address or phone number, or log in with Facebook if you already have a Facebook account. You will need to enter some basic information such as your name, username, password, and birthday. You will also need to verify your account by entering a code sent to your email or phone.

          -

          Step 4: Customize Your Profile and Settings

          -

          The fourth step is to customize your profile and settings. You can add a profile photo, a bio, a website link, and other information that you want to show on your profile. You can also adjust your privacy settings, notifications, account security, data usage, and other preferences.

          -

          Step 5: Start Posting, Following, and Engaging with Others

          -

          The final step is to start posting photos and videos on Instagram using the camera icon at the bottom of the screen. You can also post reels using the Reels tab at the bottom of the screen. You can also post stories using the camera icon at the top left of the screen or by swiping right from anywhere in the app. You can also post live videos using the Live option in the stories camera. You can also start following other users or brands that interest you using the search icon at the bottom of the screen or by tapping on their usernames on their posts or profiles. You can also like comment on or share their posts using the icons below their posts. You can also send messages or video calls using the Messenger icon at the top right of the screen or by swiping left from anywhere in the app. You can also watch videos together, play games, or co-watch posts using the Watch Together, Games, or Co-Watching options in the Messenger menu. Congratulations, you have successfully downloaded and used Instagram. Now you can enjoy its features and benefits and join the world's most popular photo and video sharing app.

          Instagram Statistics and Trends for 2023

          -

          If you are wondering how popular and influential Instagram is, here are some statistics and trends that you should know about it for 2023:

          -

          Instagram User Demographics and Behavior

          -

          According to Statista , Instagram has over 1.28 billion monthly active users and 500 million daily active users as of June 2021. It is the sixth most popular social media platform in the world, behind Facebook, YouTube, WhatsApp, Facebook Messenger, and WeChat. It is also the second most popular social media platform among U.S. adults, behind YouTube. Instagram users are mostly young, female, and urban. According to Pew Research Center , 71% of U.S. adults aged 18 to 29 use Instagram, followed by 52% of those aged 30 to 49, 28% of those aged 50 to 64, and 13% of those aged 65 and older. 43% of U.S. adult women use Instagram, compared to 31% of men. 42% of U.S. adult urban residents use Instagram, compared to 34% of suburban and 25% of rural residents. Instagram users are also highly engaged and active. According to Hootsuite , the average Instagram user spends 30 minutes per day on the app, up from 28 minutes in 2019. The average Instagram user posts 6 stories per month and watches 16 stories per day. The average Instagram user also follows 186 accounts and has 150 followers.

          Instagram Content Reach and Engagement

          -

          According to Hootsuite , Instagram has a high reach and engagement rate compared to other social media platforms. The average reach rate of an Instagram post is 22.7%, meaning that it is seen by almost a quarter of the account's followers. The average engagement rate of an Instagram post is 1.22%, meaning that it receives more than one interaction (like or comment) for every 100 views. The most popular types of content on Instagram are photos, videos, reels, stories, and live videos. According to Hootsuite , photos account for 72% of all posts on Instagram, followed by videos (14%), reels (8%), stories (4%), and live videos (2%). Photos also have the highest engagement rate (1.41%), followed by reels (0.99%), videos (0.87%), stories (0.65%), and live videos (0.35%). The most popular topics on Instagram are fashion, beauty, travel, food, fitness, art, music, sports, entertainment, and education. According to Hootsuite , the top hashtags on Instagram in 2021 are #fashion (829 million posts), #beauty (489 million posts), #travel (472 million posts), #food (406 million posts), #fitness (398 million posts), #art (393 million posts), #music (389 million posts), #sports (288 million posts), #entertainment (284 million posts), and #education (281 million posts).

          Instagram Creator Economy and Influencer Marketing

          -

          According to Business Insider , Instagram is one of the leading platforms for the creator economy and influencer marketing. The creator economy refers to the growing sector of people who make money online by creating content or offering services to their audiences. Influencer marketing refers to the practice of collaborating with influencers (people who have a large or loyal following on social media) to promote a brand, product, or service. Instagram has several features and programs that support the creator economy and influencer marketing. These include: - Creator Accounts: These are special types of accounts that offer more insights, tools, and monetization options for creators. - Creator Studio: This is a desktop dashboard that allows creators to manage their content, analytics, messages, and monetization across Facebook and Instagram. - IGTV: This is a feature that allows creators to upload and share longer videos (up to 60 minutes) on Instagram. - Reels: This is a feature that allows creators to create and share short videos (up to 30 seconds) with music, effects, and other creative tools. - Badges: These are virtual gifts that fans can buy and send to their favorite creators during live videos to show their support and appreciation. - Branded Content Ads: These are ads that allow brands to promote the content of influencers they partner with on Instagram. - Brand Collabs Manager: This is a tool that helps creators and brands find and connect with each other for collaboration opportunities. According to Influencer Marketing Hub , Instagram is the most preferred platform for influencer marketing, with 79% of brands choosing it over other platforms. The global influencer marketing industry is expected to grow to $13.8 billion in 2021, up from $9.7 billion in 2020. The average influencer marketing campaign on Instagram has an ROI of $5.20 for every $1 spent.

          Instagram Advertising and E-commerce Opportunities

          -

          According to eMarketer , Instagram is one of the most effective platforms for advertising and e-commerce. Advertising refers to the practice of paying to display ads on social media platforms to reach potential customers. E-commerce refers to the practice of buying and selling goods or services online. Instagram has several features and formats that enable advertising and e-commerce. These include: - Feed Ads: These are ads that appear on the home feed of users as they scroll through the content of the accounts they follow. - Stories Ads: These are ads that appear between the stories of users as they watch them. - Reels Ads: These are ads that appear between the reels of users as they watch them. - Explore Ads: These are ads that appear on the Explore page of users as they browse content from other accounts. - IGTV Ads: These are ads that appear on IGTV videos of users as they watch them. - Shopping Ads: These are ads that feature products from brands or creators that users can buy directly on Instagram using Checkout. - Carousel Ads: These are ads that feature multiple images or videos that users can swipe through. - Collection Ads: These are ads that feature a main image or video and a group of products that users can browse and buy. - Video Ads: These are ads that feature a single video up to 120 seconds long. - Photo Ads: These are ads that feature a single image. - Stories Canvas Ads: These are full-screen interactive ads that appear on stories. According to eMarketer , Instagram is expected to generate $18.16 billion in ad revenue in 2021, up from $13.86 billion in 2020. It is also expected to account for 30.8% of Facebook's total ad revenue in 2021, up from 28.6% in 2020. It is also expected to have 112.5 million U.S. users who will make at least one purchase via Instagram in 2021, up from 89.4 million in 2020.

          Conclusion: Don't Miss Out on the Fun and Benefits of Instagram

          -

          As you can see, Instagram is more than just a photo and video sharing app. It is a powerful social media platform that offers many features and benefits for users, creators, brands, and businesses. It is also a popular and influential platform that has a large and engaged audience, high-quality content, and lucrative opportunities. If you want to join the world's most popular photo and video sharing app, download Instagram now and start enjoying its fun and benefits. You won't regret it.

          -

          FAQs

          -

          Here are some frequently asked questions about Instagram:

          -

          Is Instagram free?

          -

          Yes, Instagram is free to download and use. However, some features may require in-app purchases, such as badges or Checkout.

          -

          Is Instagram safe?

          -

          Yes, Instagram is safe to use as long as you follow some basic guidelines, such as: - Use a strong password and enable two-factor authentication for your account security. - Adjust your privacy settings according to your preferences and needs. - Only share content that you own or have permission to use. - Only follow or interact with people you know or trust. - Report or block any abusive or inappropriate content or behavior.

          -

          How do I get more followers on Instagram?

          -

          There is no magic formula for getting more followers on Instagram, but some general tips are: - Post high-quality content that is relevant, engaging, and consistent with your niche or theme. - Use hashtags, geotags, captions, and tags to make your content more discoverable and searchable. - Follow, like, comment on, or share the content of other users or brands that interest you or relate to your niche or theme. - Engage with your followers or potential followers by responding to their comments or messages, asking questions, or creating polls or quizzes. - Use stories, reels, or live videos to show your personality, behind-the-scenes, or exclusive content. - Collaborate with other users or brands that have similar or complementary audiences or goals. - Use analytics tools to track your performance, growth, and insights.

          How do I make money on Instagram?

          -

          There are several ways to make money on Instagram, depending on your goals, skills, and audience. Some of the most common ways are: - Selling your own products or services through your shop or website link. - Promoting other brands' products or services as an affiliate marketer and earning commissions for every sale or action. - Partnering with other brands as an influencer and creating sponsored content for them in exchange for a fee or free products. - Creating and selling digital products such as e-books, courses, or memberships to your followers. - Offering coaching, consulting, or mentoring services to your followers or clients.

          How do I contact Instagram support?

          -

          If you have any questions, issues, or feedback about Instagram, you can contact Instagram support through the following ways: - Using the Help Center in the app or on the website to find answers to common questions or problems. - Using the Report a Problem option in the app or on the website to report any technical issues, bugs, or violations. - Using the Feedback option in the app or on the website to share your suggestions, ideas, or opinions. - Using the Contact Us option on the website to send an email to Instagram support.

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Beyond Compare 3.1.11 Build 12204 With Key.md b/spaces/tioseFevbu/cartoon-converter/scripts/Beyond Compare 3.1.11 Build 12204 With Key.md deleted file mode 100644 index e981175e0367f2249a19aa2159c240029ca27bdd..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Beyond Compare 3.1.11 Build 12204 With Key.md +++ /dev/null @@ -1,32 +0,0 @@ - -

          Beyond Compare 3.1.11 Build 12204 With Key: A Powerful File Comparison Tool

          -

          Beyond Compare is a file comparison tool that allows you to compare and synchronize files and folders across different platforms and devices. Whether you need to compare text files, images, binary files, archives, or registry keys, Beyond Compare can help you find and resolve differences quickly and easily.

          -

          In this article, we will introduce you to Beyond Compare 3.1.11 Build 12204 With Key, the latest version of this software that was released on April 5th, 2023[^1^]. We will also show you how to download and install it on your Windows PC, and how to activate the Pro version with a license key.

          -

          Beyond Compare 3.1.11 Build 12204 With Key


          Download Zip ->>->>->> https://urlcod.com/2uHyRK



          -

          Features of Beyond Compare 3.1.11 Build 12204 With Key

          -

          Beyond Compare 3.1.11 Build 12204 With Key comes with many features that make it a versatile and powerful file comparison tool. Some of these features are:

          -
            -
          • Support for comparing files without key column[^3^]
          • -
          • Improved handling of embedded line endings within delimited cell[^3^]
          • -
          • Ability to compare text files, images, binary files, archives, or registry keys
          • -
          • Ability to synchronize files and folders across different platforms and devices
          • -
          • Ability to merge changes from two versions of a file or folder into a single output
          • -
          • Ability to create reports in HTML, XML, or plain text formats
          • -
          • Ability to customize the interface with various themes, fonts, colors, and layouts
          • -
          • Ability to use scripts and command-line interface for automation and integration
          • -
          -

          How to Download and Install Beyond Compare 3.1.11 Build 12204 With Key on Windows PC

          -

          If you want to download and install Beyond Compare 3.1.11 Build 12204 With Key on your Windows PC, you can follow these steps:

          -
            -
          1. Go to this link and click on the green "Download" button.
          2. -
          3. Wait for the file to be downloaded on your PC. The file name should be "Beyond-Compare-3.1.11.exe".
          4. -
          5. Double-click on the downloaded file to launch the installer.
          6. -
          7. Follow the instructions on the screen to complete the installation process.
          8. -
          9. Once the installation is finished, you can launch Beyond Compare from the Start menu or the desktop shortcut.
          10. -
          -

          How to Activate the Pro Version of Beyond Compare 3.1.11 Build 12204 With Key

          -

          If you want to activate the Pro version of Beyond Compare 3.1.11 Build 12204 With Key, you will need a license key that can unlock all the features of the software. You can use this license key that we found on GitHub[^2^]:

          - -
          \n--- BEGIN LICENSE KEY --- \nH1bJTd2SauPv5Garuaq0Ig43uqq5NJOEw94wxdZTpU-pFB9GmyPk677gJ \nvC1Ro6sbAvKR4pVwtxdCfuoZDb6hJ5bVQKqlfihJfSYZt-xVrVU27+0Ja \nhFbqTmYskatMTgPyjvv99CF2Te8ec+Ys2SPxyZAF0YwOCNOWmsyqN5y9t \nq2Kw2pjoiDs5gIH-uw5U49JzOB6otS7kThBJE-H9A76u4uUvR8DKb+VcB \nrWu5qSJGEnbsXNfJdq5L2D8QgRdV-sXHp2A-7j1X2n4WIISvU1V9koIyS \nNisHFBTcWJS0sC5BTFwrtfLEE9lEwz2bxHQpW

          81aa517590
          -
          -
          \ No newline at end of file diff --git a/spaces/tizze/websitechatbot/README.md b/spaces/tizze/websitechatbot/README.md deleted file mode 100644 index ed5da8cc11b4224f17103892a35dfcacb310af35..0000000000000000000000000000000000000000 --- a/spaces/tizze/websitechatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Websitechatbot -emoji: 🏢 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py deleted file mode 100644 index 83c2df75b963e5866b63aaf0f4446a8ca61aebce..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py +++ /dev/null @@ -1,153 +0,0 @@ -import fnmatch -import os -import os.path -import random -import sys -from contextlib import contextmanager -from tempfile import NamedTemporaryFile -from typing import Any, BinaryIO, Generator, List, Union, cast - -from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed - -from pip._internal.utils.compat import get_path_uid -from pip._internal.utils.misc import format_size - - -def check_path_owner(path: str) -> bool: - # If we don't have a way to check the effective uid of this process, then - # we'll just assume that we own the directory. - if sys.platform == "win32" or not hasattr(os, "geteuid"): - return True - - assert os.path.isabs(path) - - previous = None - while path != previous: - if os.path.lexists(path): - # Check if path is writable by current user. - if os.geteuid() == 0: - # Special handling for root user in order to handle properly - # cases where users use sudo without -H flag. - try: - path_uid = get_path_uid(path) - except OSError: - return False - return path_uid == 0 - else: - return os.access(path, os.W_OK) - else: - previous, path = path, os.path.dirname(path) - return False # assume we don't own the path - - -@contextmanager -def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: - """Return a file-like object pointing to a tmp file next to path. - - The file is created securely and is ensured to be written to disk - after the context reaches its end. - - kwargs will be passed to tempfile.NamedTemporaryFile to control - the way the temporary file will be opened. - """ - with NamedTemporaryFile( - delete=False, - dir=os.path.dirname(path), - prefix=os.path.basename(path), - suffix=".tmp", - **kwargs, - ) as f: - result = cast(BinaryIO, f) - try: - yield result - finally: - result.flush() - os.fsync(result.fileno()) - - -# Tenacity raises RetryError by default, explicitly raise the original exception -_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) - -replace = _replace_retry(os.replace) - - -# test_writable_dir and _test_writable_dir_win are copied from Flit, -# with the author's agreement to also place them under pip's license. -def test_writable_dir(path: str) -> bool: - """Check if a directory is writable. - - Uses os.access() on POSIX, tries creating files on Windows. - """ - # If the directory doesn't exist, find the closest parent that does. - while not os.path.isdir(path): - parent = os.path.dirname(path) - if parent == path: - break # Should never get here, but infinite loops are bad - path = parent - - if os.name == "posix": - return os.access(path, os.W_OK) - - return _test_writable_dir_win(path) - - -def _test_writable_dir_win(path: str) -> bool: - # os.access doesn't work on Windows: http://bugs.python.org/issue2528 - # and we can't use tempfile: http://bugs.python.org/issue22107 - basename = "accesstest_deleteme_fishfingers_custard_" - alphabet = "abcdefghijklmnopqrstuvwxyz0123456789" - for _ in range(10): - name = basename + "".join(random.choice(alphabet) for _ in range(6)) - file = os.path.join(path, name) - try: - fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL) - except FileExistsError: - pass - except PermissionError: - # This could be because there's a directory with the same name. - # But it's highly unlikely there's a directory called that, - # so we'll assume it's because the parent dir is not writable. - # This could as well be because the parent dir is not readable, - # due to non-privileged user access. - return False - else: - os.close(fd) - os.unlink(file) - return True - - # This should never be reached - raise OSError("Unexpected condition testing for writable directory") - - -def find_files(path: str, pattern: str) -> List[str]: - """Returns a list of absolute paths of files beneath path, recursively, - with filenames which match the UNIX-style shell glob pattern.""" - result: List[str] = [] - for root, _, files in os.walk(path): - matches = fnmatch.filter(files, pattern) - result.extend(os.path.join(root, f) for f in matches) - return result - - -def file_size(path: str) -> Union[int, float]: - # If it's a symlink, return 0. - if os.path.islink(path): - return 0 - return os.path.getsize(path) - - -def format_file_size(path: str) -> str: - return format_size(file_size(path)) - - -def directory_size(path: str) -> Union[int, float]: - size = 0.0 - for root, _dirs, files in os.walk(path): - for filename in files: - file_path = os.path.join(root, filename) - size += file_size(file_path) - return size - - -def format_directory_size(path: str) -> str: - return format_size(directory_size(path)) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py deleted file mode 100644 index d4c32cef1eeb248399a5df1f6bc1ac8763e798d6..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py +++ /dev/null @@ -1,237 +0,0 @@ -import re -import sys -from contextlib import suppress -from typing import Iterable, NamedTuple, Optional - -from .color import Color -from .style import Style -from .text import Text - -re_ansi = re.compile( - r""" -(?:\x1b\](.*?)\x1b\\)| -(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~])) -""", - re.VERBOSE, -) - - -class _AnsiToken(NamedTuple): - """Result of ansi tokenized string.""" - - plain: str = "" - sgr: Optional[str] = "" - osc: Optional[str] = "" - - -def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: - """Tokenize a string in to plain text and ANSI codes. - - Args: - ansi_text (str): A String containing ANSI codes. - - Yields: - AnsiToken: A named tuple of (plain, sgr, osc) - """ - - position = 0 - sgr: Optional[str] - osc: Optional[str] - for match in re_ansi.finditer(ansi_text): - start, end = match.span(0) - osc, sgr = match.groups() - if start > position: - yield _AnsiToken(ansi_text[position:start]) - if sgr: - if sgr.endswith("m"): - yield _AnsiToken("", sgr[1:-1], osc) - else: - yield _AnsiToken("", sgr, osc) - position = end - if position < len(ansi_text): - yield _AnsiToken(ansi_text[position:]) - - -SGR_STYLE_MAP = { - 1: "bold", - 2: "dim", - 3: "italic", - 4: "underline", - 5: "blink", - 6: "blink2", - 7: "reverse", - 8: "conceal", - 9: "strike", - 21: "underline2", - 22: "not dim not bold", - 23: "not italic", - 24: "not underline", - 25: "not blink", - 26: "not blink2", - 27: "not reverse", - 28: "not conceal", - 29: "not strike", - 30: "color(0)", - 31: "color(1)", - 32: "color(2)", - 33: "color(3)", - 34: "color(4)", - 35: "color(5)", - 36: "color(6)", - 37: "color(7)", - 39: "default", - 40: "on color(0)", - 41: "on color(1)", - 42: "on color(2)", - 43: "on color(3)", - 44: "on color(4)", - 45: "on color(5)", - 46: "on color(6)", - 47: "on color(7)", - 49: "on default", - 51: "frame", - 52: "encircle", - 53: "overline", - 54: "not frame not encircle", - 55: "not overline", - 90: "color(8)", - 91: "color(9)", - 92: "color(10)", - 93: "color(11)", - 94: "color(12)", - 95: "color(13)", - 96: "color(14)", - 97: "color(15)", - 100: "on color(8)", - 101: "on color(9)", - 102: "on color(10)", - 103: "on color(11)", - 104: "on color(12)", - 105: "on color(13)", - 106: "on color(14)", - 107: "on color(15)", -} - - -class AnsiDecoder: - """Translate ANSI code in to styled Text.""" - - def __init__(self) -> None: - self.style = Style.null() - - def decode(self, terminal_text: str) -> Iterable[Text]: - """Decode ANSI codes in an interable of lines. - - Args: - lines (Iterable[str]): An iterable of lines of terminal output. - - Yields: - Text: Marked up Text. - """ - for line in terminal_text.splitlines(): - yield self.decode_line(line) - - def decode_line(self, line: str) -> Text: - """Decode a line containing ansi codes. - - Args: - line (str): A line of terminal output. - - Returns: - Text: A Text instance marked up according to ansi codes. - """ - from_ansi = Color.from_ansi - from_rgb = Color.from_rgb - _Style = Style - text = Text() - append = text.append - line = line.rsplit("\r", 1)[-1] - for plain_text, sgr, osc in _ansi_tokenize(line): - if plain_text: - append(plain_text, self.style or None) - elif osc is not None: - if osc.startswith("8;"): - _params, semicolon, link = osc[2:].partition(";") - if semicolon: - self.style = self.style.update_link(link or None) - elif sgr is not None: - # Translate in to semi-colon separated codes - # Ignore invalid codes, because we want to be lenient - codes = [ - min(255, int(_code) if _code else 0) - for _code in sgr.split(";") - if _code.isdigit() or _code == "" - ] - iter_codes = iter(codes) - for code in iter_codes: - if code == 0: - # reset - self.style = _Style.null() - elif code in SGR_STYLE_MAP: - # styles - self.style += _Style.parse(SGR_STYLE_MAP[code]) - elif code == 38: - #  Foreground - with suppress(StopIteration): - color_type = next(iter_codes) - if color_type == 5: - self.style += _Style.from_color( - from_ansi(next(iter_codes)) - ) - elif color_type == 2: - self.style += _Style.from_color( - from_rgb( - next(iter_codes), - next(iter_codes), - next(iter_codes), - ) - ) - elif code == 48: - # Background - with suppress(StopIteration): - color_type = next(iter_codes) - if color_type == 5: - self.style += _Style.from_color( - None, from_ansi(next(iter_codes)) - ) - elif color_type == 2: - self.style += _Style.from_color( - None, - from_rgb( - next(iter_codes), - next(iter_codes), - next(iter_codes), - ), - ) - - return text - - -if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover - import io - import os - import pty - import sys - - decoder = AnsiDecoder() - - stdout = io.BytesIO() - - def read(fd: int) -> bytes: - data = os.read(fd, 1024) - stdout.write(data) - return data - - pty.spawn(sys.argv[1:], read) - - from .console import Console - - console = Console(record=True) - - stdout_result = stdout.getvalue().decode("utf-8") - print(stdout_result) - - for line in decoder.decode(stdout_result): - console.print(line) - - console.save_html("stdout.html") diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py deleted file mode 100644 index fa0b245d279e96724d5610f93bc3b3c8c22ca032..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py +++ /dev/null @@ -1,397 +0,0 @@ -""" -Low-level helpers for the SecureTransport bindings. - -These are Python functions that are not directly related to the high-level APIs -but are necessary to get them to work. They include a whole bunch of low-level -CoreFoundation messing about and memory management. The concerns in this module -are almost entirely about trying to avoid memory leaks and providing -appropriate and useful assistance to the higher-level code. -""" -import base64 -import ctypes -import itertools -import os -import re -import ssl -import struct -import tempfile - -from .bindings import CFConst, CoreFoundation, Security - -# This regular expression is used to grab PEM data out of a PEM bundle. -_PEM_CERTS_RE = re.compile( - b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL -) - - -def _cf_data_from_bytes(bytestring): - """ - Given a bytestring, create a CFData object from it. This CFData object must - be CFReleased by the caller. - """ - return CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) - ) - - -def _cf_dictionary_from_tuples(tuples): - """ - Given a list of Python tuples, create an associated CFDictionary. - """ - dictionary_size = len(tuples) - - # We need to get the dictionary keys and values out in the same order. - keys = (t[0] for t in tuples) - values = (t[1] for t in tuples) - cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) - cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) - - return CoreFoundation.CFDictionaryCreate( - CoreFoundation.kCFAllocatorDefault, - cf_keys, - cf_values, - dictionary_size, - CoreFoundation.kCFTypeDictionaryKeyCallBacks, - CoreFoundation.kCFTypeDictionaryValueCallBacks, - ) - - -def _cfstr(py_bstr): - """ - Given a Python binary data, create a CFString. - The string must be CFReleased by the caller. - """ - c_str = ctypes.c_char_p(py_bstr) - cf_str = CoreFoundation.CFStringCreateWithCString( - CoreFoundation.kCFAllocatorDefault, - c_str, - CFConst.kCFStringEncodingUTF8, - ) - return cf_str - - -def _create_cfstring_array(lst): - """ - Given a list of Python binary data, create an associated CFMutableArray. - The array must be CFReleased by the caller. - - Raises an ssl.SSLError on failure. - """ - cf_arr = None - try: - cf_arr = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - if not cf_arr: - raise MemoryError("Unable to allocate memory!") - for item in lst: - cf_str = _cfstr(item) - if not cf_str: - raise MemoryError("Unable to allocate memory!") - try: - CoreFoundation.CFArrayAppendValue(cf_arr, cf_str) - finally: - CoreFoundation.CFRelease(cf_str) - except BaseException as e: - if cf_arr: - CoreFoundation.CFRelease(cf_arr) - raise ssl.SSLError("Unable to allocate array: %s" % (e,)) - return cf_arr - - -def _cf_string_to_unicode(value): - """ - Creates a Unicode string from a CFString object. Used entirely for error - reporting. - - Yes, it annoys me quite a lot that this function is this complex. - """ - value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) - - string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, CFConst.kCFStringEncodingUTF8 - ) - if string is None: - buffer = ctypes.create_string_buffer(1024) - result = CoreFoundation.CFStringGetCString( - value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 - ) - if not result: - raise OSError("Error copying C string from CFStringRef") - string = buffer.value - if string is not None: - string = string.decode("utf-8") - return string - - -def _assert_no_error(error, exception_class=None): - """ - Checks the return code and throws an exception if there is an error to - report - """ - if error == 0: - return - - cf_error_string = Security.SecCopyErrorMessageString(error, None) - output = _cf_string_to_unicode(cf_error_string) - CoreFoundation.CFRelease(cf_error_string) - - if output is None or output == u"": - output = u"OSStatus %s" % error - - if exception_class is None: - exception_class = ssl.SSLError - - raise exception_class(output) - - -def _cert_array_from_pem(pem_bundle): - """ - Given a bundle of certs in PEM format, turns them into a CFArray of certs - that can be used to validate a cert chain. - """ - # Normalize the PEM bundle's line endings. - pem_bundle = pem_bundle.replace(b"\r\n", b"\n") - - der_certs = [ - base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) - ] - if not der_certs: - raise ssl.SSLError("No root certificates specified") - - cert_array = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - if not cert_array: - raise ssl.SSLError("Unable to allocate memory!") - - try: - for der_bytes in der_certs: - certdata = _cf_data_from_bytes(der_bytes) - if not certdata: - raise ssl.SSLError("Unable to allocate memory!") - cert = Security.SecCertificateCreateWithData( - CoreFoundation.kCFAllocatorDefault, certdata - ) - CoreFoundation.CFRelease(certdata) - if not cert: - raise ssl.SSLError("Unable to build cert object!") - - CoreFoundation.CFArrayAppendValue(cert_array, cert) - CoreFoundation.CFRelease(cert) - except Exception: - # We need to free the array before the exception bubbles further. - # We only want to do that if an error occurs: otherwise, the caller - # should free. - CoreFoundation.CFRelease(cert_array) - raise - - return cert_array - - -def _is_cert(item): - """ - Returns True if a given CFTypeRef is a certificate. - """ - expected = Security.SecCertificateGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _is_identity(item): - """ - Returns True if a given CFTypeRef is an identity. - """ - expected = Security.SecIdentityGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _temporary_keychain(): - """ - This function creates a temporary Mac keychain that we can use to work with - credentials. This keychain uses a one-time password and a temporary file to - store the data. We expect to have one keychain per socket. The returned - SecKeychainRef must be freed by the caller, including calling - SecKeychainDelete. - - Returns a tuple of the SecKeychainRef and the path to the temporary - directory that contains it. - """ - # Unfortunately, SecKeychainCreate requires a path to a keychain. This - # means we cannot use mkstemp to use a generic temporary file. Instead, - # we're going to create a temporary directory and a filename to use there. - # This filename will be 8 random bytes expanded into base64. We also need - # some random bytes to password-protect the keychain we're creating, so we - # ask for 40 random bytes. - random_bytes = os.urandom(40) - filename = base64.b16encode(random_bytes[:8]).decode("utf-8") - password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 - tempdirectory = tempfile.mkdtemp() - - keychain_path = os.path.join(tempdirectory, filename).encode("utf-8") - - # We now want to create the keychain itself. - keychain = Security.SecKeychainRef() - status = Security.SecKeychainCreate( - keychain_path, len(password), password, False, None, ctypes.byref(keychain) - ) - _assert_no_error(status) - - # Having created the keychain, we want to pass it off to the caller. - return keychain, tempdirectory - - -def _load_items_from_file(keychain, path): - """ - Given a single file, loads all the trust objects from it into arrays and - the keychain. - Returns a tuple of lists: the first list is a list of identities, the - second a list of certs. - """ - certificates = [] - identities = [] - result_array = None - - with open(path, "rb") as f: - raw_filedata = f.read() - - try: - filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) - ) - result_array = CoreFoundation.CFArrayRef() - result = Security.SecItemImport( - filedata, # cert data - None, # Filename, leaving it out for now - None, # What the type of the file is, we don't care - None, # what's in the file, we don't care - 0, # import flags - None, # key params, can include passphrase in the future - keychain, # The keychain to insert into - ctypes.byref(result_array), # Results - ) - _assert_no_error(result) - - # A CFArray is not very useful to us as an intermediary - # representation, so we are going to extract the objects we want - # and then free the array. We don't need to keep hold of keys: the - # keychain already has them! - result_count = CoreFoundation.CFArrayGetCount(result_array) - for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index) - item = ctypes.cast(item, CoreFoundation.CFTypeRef) - - if _is_cert(item): - CoreFoundation.CFRetain(item) - certificates.append(item) - elif _is_identity(item): - CoreFoundation.CFRetain(item) - identities.append(item) - finally: - if result_array: - CoreFoundation.CFRelease(result_array) - - CoreFoundation.CFRelease(filedata) - - return (identities, certificates) - - -def _load_client_cert_chain(keychain, *paths): - """ - Load certificates and maybe keys from a number of files. Has the end goal - of returning a CFArray containing one SecIdentityRef, and then zero or more - SecCertificateRef objects, suitable for use as a client certificate trust - chain. - """ - # Ok, the strategy. - # - # This relies on knowing that macOS will not give you a SecIdentityRef - # unless you have imported a key into a keychain. This is a somewhat - # artificial limitation of macOS (for example, it doesn't necessarily - # affect iOS), but there is nothing inside Security.framework that lets you - # get a SecIdentityRef without having a key in a keychain. - # - # So the policy here is we take all the files and iterate them in order. - # Each one will use SecItemImport to have one or more objects loaded from - # it. We will also point at a keychain that macOS can use to work with the - # private key. - # - # Once we have all the objects, we'll check what we actually have. If we - # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, - # we'll take the first certificate (which we assume to be our leaf) and - # ask the keychain to give us a SecIdentityRef with that cert's associated - # key. - # - # We'll then return a CFArray containing the trust chain: one - # SecIdentityRef and then zero-or-more SecCertificateRef objects. The - # responsibility for freeing this CFArray will be with the caller. This - # CFArray must remain alive for the entire connection, so in practice it - # will be stored with a single SSLSocket, along with the reference to the - # keychain. - certificates = [] - identities = [] - - # Filter out bad paths. - paths = (path for path in paths if path) - - try: - for file_path in paths: - new_identities, new_certs = _load_items_from_file(keychain, file_path) - identities.extend(new_identities) - certificates.extend(new_certs) - - # Ok, we have everything. The question is: do we have an identity? If - # not, we want to grab one from the first cert we have. - if not identities: - new_identity = Security.SecIdentityRef() - status = Security.SecIdentityCreateWithCertificate( - keychain, certificates[0], ctypes.byref(new_identity) - ) - _assert_no_error(status) - identities.append(new_identity) - - # We now want to release the original certificate, as we no longer - # need it. - CoreFoundation.CFRelease(certificates.pop(0)) - - # We now need to build a new CFArray that holds the trust chain. - trust_chain = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - for item in itertools.chain(identities, certificates): - # ArrayAppendValue does a CFRetain on the item. That's fine, - # because the finally block will release our other refs to them. - CoreFoundation.CFArrayAppendValue(trust_chain, item) - - return trust_chain - finally: - for obj in itertools.chain(identities, certificates): - CoreFoundation.CFRelease(obj) - - -TLS_PROTOCOL_VERSIONS = { - "SSLv2": (0, 2), - "SSLv3": (3, 0), - "TLSv1": (3, 1), - "TLSv1.1": (3, 2), - "TLSv1.2": (3, 3), -} - - -def _build_tls_unknown_ca_alert(version): - """ - Builds a TLS alert record for an unknown CA. - """ - ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version] - severity_fatal = 0x02 - description_unknown_ca = 0x30 - msg = struct.pack(">BB", severity_fatal, description_unknown_ca) - msg_len = len(msg) - record_type_alert = 0x15 - record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg - return record diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py deleted file mode 100644 index 87046ab391b9f5e577e6ef0181c50de7e9c7f01b..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py +++ /dev/null @@ -1,45 +0,0 @@ -"""distutils.command.install_headers - -Implements the Distutils 'install_headers' command, to install C/C++ header -files to the Python include directory.""" - -from distutils.core import Command - - -# XXX force is never used -class install_headers(Command): - - description = "install C/C++ header files" - - user_options = [ - ('install-dir=', 'd', "directory to install header files to"), - ('force', 'f', "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options( - 'install', ('install_headers', 'install_dir'), ('force', 'force') - ) - - def run(self): - headers = self.distribution.headers - if not headers: - return - - self.mkpath(self.install_dir) - for header in headers: - (out, _) = self.copy_file(header, self.install_dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.distribution.headers or [] - - def get_outputs(self): - return self.outfiles diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/ctc_loss.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/ctc_loss.py deleted file mode 100644 index 24c6390b8f82a6c65ad243f52974dc8aedc576a7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/losses/ctc_loss.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn - -from mmocr.models.builder import LOSSES - - -@LOSSES.register_module() -class CTCLoss(nn.Module): - """Implementation of loss module for CTC-loss based text recognition. - - Args: - flatten (bool): If True, use flattened targets, else padded targets. - blank (int): Blank label. Default 0. - reduction (str): Specifies the reduction to apply to the output, - should be one of the following: ('none', 'mean', 'sum'). - zero_infinity (bool): Whether to zero infinite losses and - the associated gradients. Default: False. - Infinite losses mainly occur when the inputs - are too short to be aligned to the targets. - """ - - def __init__(self, - flatten=True, - blank=0, - reduction='mean', - zero_infinity=False, - **kwargs): - super().__init__() - assert isinstance(flatten, bool) - assert isinstance(blank, int) - assert isinstance(reduction, str) - assert isinstance(zero_infinity, bool) - - self.flatten = flatten - self.blank = blank - self.ctc_loss = nn.CTCLoss( - blank=blank, reduction=reduction, zero_infinity=zero_infinity) - - def forward(self, outputs, targets_dict, img_metas=None): - """ - Args: - outputs (Tensor): A raw logit tensor of shape :math:`(N, T, C)`. - targets_dict (dict): A dict with 3 keys ``target_lengths``, - ``flatten_targets`` and ``targets``. - - - | ``target_lengths`` (Tensor): A tensor of shape :math:`(N)`. - Each item is the length of a word. - - - | ``flatten_targets`` (Tensor): Used if ``self.flatten=True`` - (default). A tensor of shape - (sum(targets_dict['target_lengths'])). Each item is the - index of a character. - - - | ``targets`` (Tensor): Used if ``self.flatten=False``. A - tensor of :math:`(N, T)`. Empty slots are padded with - ``self.blank``. - - img_metas (dict): A dict that contains meta information of input - images. Preferably with the key ``valid_ratio``. - - Returns: - dict: The loss dict with key ``loss_ctc``. - """ - valid_ratios = None - if img_metas is not None: - valid_ratios = [ - img_meta.get('valid_ratio', 1.0) for img_meta in img_metas - ] - - outputs = torch.log_softmax(outputs, dim=2) - bsz, seq_len = outputs.size(0), outputs.size(1) - outputs_for_loss = outputs.permute(1, 0, 2).contiguous() # T * N * C - - if self.flatten: - targets = targets_dict['flatten_targets'] - else: - targets = torch.full( - size=(bsz, seq_len), fill_value=self.blank, dtype=torch.long) - for idx, tensor in enumerate(targets_dict['targets']): - valid_len = min(tensor.size(0), seq_len) - targets[idx, :valid_len] = tensor[:valid_len] - - target_lengths = targets_dict['target_lengths'] - target_lengths = torch.clamp(target_lengths, min=1, max=seq_len).long() - - input_lengths = torch.full( - size=(bsz, ), fill_value=seq_len, dtype=torch.long) - if not self.flatten and valid_ratios is not None: - input_lengths = [ - math.ceil(valid_ratio * seq_len) - for valid_ratio in valid_ratios - ] - input_lengths = torch.Tensor(input_lengths).long() - - loss_ctc = self.ctc_loss(outputs_for_loss, targets, input_lengths, - target_lengths) - - losses = dict(loss_ctc=loss_ctc) - - return losses diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py deleted file mode 100644 index e8df265edefee1b7e5892fe373c1c0f80f59bf7b..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' -# model settings -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144)))), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/faq.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/faq.md deleted file mode 100644 index e0ef6aff84a851513776720d5e97e00db5115d18..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/faq.md +++ /dev/null @@ -1,82 +0,0 @@ -We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, please create an issue using the [provided templates](https://github.com/open-mmlab/mmdetection/blob/master/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. - -## MMCV Installation - -- Compatibility issue between MMCV and MMDetection; "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, <=xxx." - - Please install the correct version of MMCV for the version of your MMDetection following the [installation instruction](https://mmdetection.readthedocs.io/en/latest/get_started.html#installation). - -- "No module named 'mmcv.ops'"; "No module named 'mmcv._ext'". - - 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. - 2. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation). - -## PyTorch/CUDA Environment - -- "RTX 30 series card fails when building MMCV or MMDet" - - 1. Temporary work-around: do `MMCV_WITH_OPS=1 MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80' pip install -e .`. - The common issue is `nvcc fatal : Unsupported gpu architecture 'compute_86'`. This means that the compiler should optimize for sm_86, i.e., nvidia 30 series card, but such optimizations have not been supported by CUDA toolkit 11.0. - This work-around modifies the compile flag by adding `MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80'`, which tells `nvcc` to optimize for **sm_80**, i.e., Nvidia A100. Although A100 is different from the 30 series card, they use similar ampere architecture. This may hurt the performance but it works. - 2. PyTorch developers have updated that the default compiler flags should be fixed by [pytorch/pytorch#47585](https://github.com/pytorch/pytorch/pull/47585). So using PyTorch-nightly may also be able to solve the problem, though we have not tested it yet. - -- "invalid device function" or "no kernel image is available for execution". - - 1. Check if your cuda runtime version (under `/usr/local/`), `nvcc --version` and `conda list cudatoolkit` version match. - 2. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built for the correct GPU architecture. - You may need to set `TORCH_CUDA_ARCH_LIST` to reinstall MMCV. - The GPU arch table could be found [here](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list), - i.e. run `TORCH_CUDA_ARCH_LIST=7.0 pip install mmcv-full` to build MMCV for Volta GPUs. - The compatibility issue could happen when using old GPUS, e.g., Tesla K80 (3.7) on colab. - 3. Check whether the running environment is the same as that when mmcv/mmdet has compiled. - For example, you may compile mmcv using CUDA 10.0 but run it on CUDA 9.0 environments. - -- "undefined symbol" or "cannot open xxx.so". - - 1. If those symbols are CUDA/C++ symbols (e.g., libcudart.so or GLIBCXX), check whether the CUDA/GCC runtimes are the same as those used for compiling mmcv, - i.e. run `python mmdet/utils/collect_env.py` to see if `"MMCV Compiler"`/`"MMCV CUDA Compiler"` is the same as `"GCC"`/`"CUDA_HOME"`. - 2. If those symbols are PyTorch symbols (e.g., symbols containing caffe, aten, and TH), check whether the PyTorch version is the same as that used for compiling mmcv. - 3. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built by and running on the same environment. - -- setuptools.sandbox.UnpickleableException: DistutilsSetupError("each element of 'ext_modules' option must be an Extension instance or 2-tuple") - - 1. If you are using miniconda rather than anaconda, check whether Cython is installed as indicated in [#3379](https://github.com/open-mmlab/mmdetection/issues/3379). - You need to manually install Cython first and then run command `pip install -r requirements.txt`. - 2. You may also need to check the compatibility between the `setuptools`, `Cython`, and `PyTorch` in your environment. - -- "Segmentation fault". - 1. Check you GCC version and use GCC 5.4. This usually caused by the incompatibility between PyTorch and the environment (e.g., GCC < 4.9 for PyTorch). We also recommand the users to avoid using GCC 5.5 because many feedbacks report that GCC 5.5 will cause "segmentation fault" and simply changing it to GCC 5.4 could solve the problem. - - 2. Check whether PyTorch is correctly installed and could use CUDA op, e.g. type the following command in your terminal. - - ```shell - python -c 'import torch; print(torch.cuda.is_available())' - ``` - - And see whether they could correctly output results. - - 3. If Pytorch is correctly installed, check whether MMCV is correctly installed. - - ```shell - python -c 'import mmcv; import mmcv.ops' - ``` - - If MMCV is correctly installed, then there will be no issue of the above two commands. - - 4. If MMCV and Pytorch is correctly installed, you man use `ipdb`, `pdb` to set breakpoints or directly add 'print' in mmdetection code and see which part leads the segmentation fault. - -## Training - -- "Loss goes Nan" - 1. Check if the dataset annotations are valid: zero-size bounding boxes will cause the regression loss to be Nan due to the commonly used transformation for box regression. Some small size (width or height are smaller than 1) boxes will also cause this problem after data augmentation (e.g., instaboost). So check the data and try to filter out those zero-size boxes and skip some risky augmentations on the small-size boxes when you face the problem. - 2. Reduce the learning rate: the learning rate might be too large due to some reasons, e.g., change of batch size. You can rescale them to the value that could stably train the model. - 3. Extend the warmup iterations: some models are sensitive to the learning rate at the start of the training. You can extend the warmup iterations, e.g., change the `warmup_iters` from 500 to 1000 or 2000. - 4. Add gradient clipping: some models requires gradient clipping to stablize the training process. You can add gradient clippint to avoid gradients that are too large. -- ’GPU out of memory" - 1. There are some scenarios when there are large amount of ground truth boxes, which may cause OOM during target assignment. You can set `gpu_assign_thr=N` in the config of assigner thus the assigner will calculate box overlaps through CPU when there are more than N GT boxes. - 2. Set `with_cp=True` in the backbone. This uses the sublinear strategy in PyTorch to reduce GPU memory cost in the backbone. - 3. Try mixed precision training using following the examples in `config/fp16`. The `loss_scale` might need further tuning for different models. - -- "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one" - 1. This error indicates that your module has parameters that were not used in producing loss. This phenomenon may be caused by running different branches in your code in DDP mode. - 2. You can set ` find_unused_parameters = True` in the config to solve the above problems or find those unused parameters manually. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/yolact.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/yolact.py deleted file mode 100644 index d40a091f3c023a2d464d4946880ee50b64676f21..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/yolact.py +++ /dev/null @@ -1,141 +0,0 @@ -import torch - -from mmdet.core import bbox2result -from ..builder import DETECTORS, build_head -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class YOLACT(SingleStageDetector): - """Implementation of `YOLACT `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - segm_head, - mask_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - self.segm_head = build_head(segm_head) - self.mask_head = build_head(mask_head) - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - raise NotImplementedError - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # convert Bitmap mask or Polygon Mask to Tensor here - gt_masks = [ - gt_mask.to_tensor(dtype=torch.uint8, device=img.device) - for gt_mask in gt_masks - ] - - x = self.extract_feat(img) - - cls_score, bbox_pred, coeff_pred = self.bbox_head(x) - bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels, - img_metas) - losses, sampling_results = self.bbox_head.loss( - *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - - segm_head_outs = self.segm_head(x[0]) - loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) - losses.update(loss_segm) - - mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, - sampling_results) - loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, - img_metas, sampling_results) - losses.update(loss_mask) - - # check NaN and Inf - for loss_name in losses.keys(): - assert torch.isfinite(torch.stack(losses[loss_name]))\ - .all().item(), '{} becomes infinite or NaN!'\ - .format(loss_name) - - return losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test time augmentation.""" - x = self.extract_feat(img) - - cls_score, bbox_pred, coeff_pred = self.bbox_head(x) - - bbox_inputs = (cls_score, bbox_pred, - coeff_pred) + (img_metas, self.test_cfg, rescale) - det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes( - *bbox_inputs) - bbox_results = [ - bbox2result(det_bbox, det_label, self.bbox_head.num_classes) - for det_bbox, det_label in zip(det_bboxes, det_labels) - ] - - num_imgs = len(img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - segm_results = [[[] for _ in range(self.mask_head.num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas) - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], det_labels[i], img_metas[i], rescale) - segm_results.append(segm_result) - return list(zip(bbox_results, segm_results)) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test with augmentations.""" - raise NotImplementedError diff --git a/spaces/triggah61/chingu-music/tests/modules/test_lstm.py b/spaces/triggah61/chingu-music/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/triggah61/chingu-music/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/trttung1610/musicgen/audiocraft/metrics/clap_consistency.py b/spaces/trttung1610/musicgen/audiocraft/metrics/clap_consistency.py deleted file mode 100644 index d2a6c61ae177533ca2fb17e25bc77d2acbbe3791..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/metrics/clap_consistency.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import typing as tp - -import torch -import torchmetrics -from transformers import RobertaTokenizer # type: ignore - -from ..data.audio_utils import convert_audio -from ..environment import AudioCraftEnvironment -from ..utils.utils import load_clap_state_dict - -try: - import laion_clap # type: ignore -except ImportError: - laion_clap = None - - -class TextConsistencyMetric(torchmetrics.Metric): - """Text consistency metric measuring consistency between audio and text pairs.""" - - def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None: - raise NotImplementedError("implement how to update the metric from the audio and text pairs.") - - def compute(self): - raise NotImplementedError("implement how to compute the final metric score.") - - -class CLAPTextConsistencyMetric(TextConsistencyMetric): - """Text consistency metric relying on Contrastive Language-Audio Pretraining (CLAP). - - This metric is similar to the MuLan Cycle Consistency from MusicLM (https://arxiv.org/pdf/2301.11325.pdf) - or the CLAP score used in Make-An-Audio (https://arxiv.org/pdf/2301.12661v1.pdf). - - As a joint audio-text embedding model, a pretrained CLAP model can be used to quantify the - similarity between audio-text pairs. We compute the CLAP embeddings from the text descriptions as - well as the generated audio based on them, and define the MCC metric as the average cosine similarity - between these embeddings. - - Model implementation & pre-trained checkpoints: https://github.com/LAION-AI/CLAP - """ - def __init__(self, model_path: tp.Union[str, Path], model_arch: str = 'HTSAT-tiny', enable_fusion: bool = False): - super().__init__() - if laion_clap is None: - raise ImportError("Please install CLAP to compute text consistency: 'pip install laion_clap'") - self.add_state("cosine_sum", default=torch.tensor(0.), dist_reduce_fx="sum") - self.add_state("weight", default=torch.tensor(0.), dist_reduce_fx="sum") - self._initialize_model(model_path, model_arch, enable_fusion) - - def _initialize_model(self, model_path: tp.Union[str, Path], model_arch: str, enable_fusion: bool): - model_path = AudioCraftEnvironment.resolve_reference_path(model_path) - self.tokenize = RobertaTokenizer.from_pretrained('roberta-base') - self.model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch) - self.model_sample_rate = 48_000 - load_clap_state_dict(self.model, model_path) - self.model.eval() - - def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict: - # we use the default params from CLAP module here as well - return self.tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt") - - def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None: - """Compute cosine similarity between audio and text pairs and accumulate scores over the dataset.""" - assert audio.size(0) == len(text), "Number of audio and text samples should match" - assert torch.all(sample_rates == sample_rates[0].item()), "All items in batch should have the same sample rate" - sample_rate = int(sample_rates[0].item()) - # convert audio batch to 48kHz monophonic audio with no channel dimension: [B, C, T] -> [B, T] - audio = convert_audio(audio, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1).mean(dim=1) - audio_embeddings = self.model.get_audio_embedding_from_data(audio, use_tensor=True) - text_embeddings = self.model.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True) - # cosine similarity between the text and the audio embedding - cosine_sim = torch.nn.functional.cosine_similarity(audio_embeddings, text_embeddings, dim=1, eps=1e-8) - self.cosine_sum += cosine_sim.sum(dim=0) - self.weight += torch.tensor(cosine_sim.size(0)) - - def compute(self): - """Computes the average cosine similarty across all audio/text pairs.""" - assert self.weight.item() > 0, "Unable to compute with total number of comparisons <= 0" # type: ignore - return (self.cosine_sum / self.weight).item() # type: ignore diff --git a/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/id_loss.py b/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/id_loss.py deleted file mode 100644 index a828023e115243e48918538d31b91d662cd12d0f..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/id_loss.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -from torch import nn - -from models.facial_recognition.model_irse import Backbone - - -class IDLoss(nn.Module): - def __init__(self, opts): - super(IDLoss, self).__init__() - print('Loading ResNet ArcFace') - self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') - self.facenet.load_state_dict(torch.load(opts.ir_se50_weights)) - self.pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) - self.facenet.eval() - self.opts = opts - - def extract_feats(self, x): - if x.shape[2] != 256: - x = self.pool(x) - x = x[:, :, 35:223, 32:220] # Crop interesting region - x = self.face_pool(x) - x_feats = self.facenet(x) - return x_feats - - def forward(self, y_hat, y): - n_samples = y.shape[0] - y_feats = self.extract_feats(y) # Otherwise use the feature from there - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - sim_improvement = 0 - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - loss += 1 - diff_target - count += 1 - - return loss / count, sim_improvement / count diff --git a/spaces/umm-maybe/AI-image-detector/app.py b/spaces/umm-maybe/AI-image-detector/app.py deleted file mode 100644 index a1c816c20cb4ce587ee31dbbb45c50c7b579b3b1..0000000000000000000000000000000000000000 --- a/spaces/umm-maybe/AI-image-detector/app.py +++ /dev/null @@ -1,21 +0,0 @@ -import gradio as gr -from transformers import pipeline - -pipe = pipeline("image-classification", "umm-maybe/AI-image-detector") - -def image_classifier(image): - outputs = pipe(image) - results = {} - for result in outputs: - results[result['label']] = result['score'] - return results - -title = "Maybe's AI Art Detector" -description = """ -This app is a proof-of-concept demonstration of using a ViT model to predict whether an artistic image was generated using AI.\n -For more information please see the blog post describing it at: -https://medium.com/@matthewmaybe/can-an-ai-learn-to-identify-ai-art-545d9d6af226 -""" - -demo = gr.Interface(fn=image_classifier, inputs=gr.Image(type="pil"), outputs="label", title=title, description=description) -demo.launch(show_api=False) diff --git a/spaces/upstage/open-ko-llm-leaderboard/src/assets/text_content.py b/spaces/upstage/open-ko-llm-leaderboard/src/assets/text_content.py deleted file mode 100644 index 0791122b55e950719177a573020ef1a8b22a3470..0000000000000000000000000000000000000000 --- a/spaces/upstage/open-ko-llm-leaderboard/src/assets/text_content.py +++ /dev/null @@ -1,159 +0,0 @@ -from src.display_models.model_metadata_type import ModelType - -TITLE = """""" -BOTTOM_LOGO = """""" - -INTRODUCTION_TEXT = f""" -🚀 The Open Ko-LLM Leaderboard 🇰🇷 objectively evaluates the performance of Korean Large Language Model (LLM). - -When you submit a model on the "Submit here!" page, it is automatically evaluated. The GPU used for evaluation is operated with the support of __[KT](https://cloud.kt.com/)__. -The data used for evaluation consists of datasets to assess reasoning, language understanding, hallucination, and commonsense. -The evaluation dataset is exclusively private and only available for evaluation process. -More detailed information about the benchmark dataset is provided on the “About” page. - -This leaderboard is co-hosted by __[Upstage](https://www.upstage.ai)__, and __[NIA](https://www.nia.or.kr/site/nia_kor/main.do)__ that provides various Korean Data Sets through __[AI-Hub](https://aihub.or.kr)__, and operated by __[Upstage](https://www.upstage.ai)__. -""" - -LLM_BENCHMARKS_TEXT = f""" -# Context -While outstanding LLM models are being released competitively, most of them are centered on English and are familiar with the English cultural sphere. We operate the Korean leaderboard, 🚀 Open Ko-LLM, to evaluate models that reflect the characteristics of the Korean language and Korean culture. Through this, we hope that users can conveniently use the leaderboard, participate, and contribute to the advancement of research in Korean. - -## Icons -{ModelType.PT.to_str(" : ")} model -{ModelType.FT.to_str(" : ")} model -{ModelType.IFT.to_str(" : ")} model -{ModelType.RL.to_str(" : ")} model -If there is no icon, it indicates that there is insufficient information about the model. -Please provide information about the model through an issue! 🤩 - -🏴‍☠️ : This icon indicates that the model has been selected as a subject of caution by the community, implying that users should exercise restraint when using it. Clicking on the icon will take you to a discussion about that model. -(Models that have used the evaluation set for training to achieve a high leaderboard ranking, among others, are selected as subjects of caution.) - -## How it works - -📈 We evaluate models using the [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), a unified framework to test generative language models on a large number of different evaluation tasks. - -We have set up a benchmark using datasets translated into Korean, and applied variations by human experts, from the four tasks (HellaSwag, MMLU, Arc, Truthful QA) operated by HuggingFace OpenLLM. We have also added a new dataset prepared from scratch. -- Ko-HellaSwag (provided by __[Upstage](https://www.upstage.ai/)__, machine translation) -- Ko-MMLU (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation) -- Ko-Arc (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation) -- Ko-Truthful QA (provided by __[Upstage](https://www.upstage.ai/)__, human translation and variation) -- Ko-CommonGen V2 (provided by __[Korea University NLP&AI Lab](http://nlp.korea.ac.kr/)__, created from scratch) - -To provide an evaluation befitting the LLM era, we've selected benchmark datasets suitable for assessing these elements: expertise, inference, hallucination, and common sense. The final score is converted to the average score from each evaluation datasets. - -GPUs are provided by __[KT](https://cloud.kt.com/)__ for the evaluations. - -## Details and Logs -- Detailed numerical results in the `results` Upstage dataset: https://huggingface.co/datasets/open-ko-llm-leaderboard/results -- Community queries and running status in the `requests` Upstage dataset: https://huggingface.co/datasets/open-ko-llm-leaderboard/requests - -## More resources -If you still have questions, you can check our FAQ [here](https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard/discussions/1)! -""" - -EVALUATION_QUEUE_TEXT = f""" -# Evaluation Queue for the 🚀 Open Ko-LLM Leaderboard -Models added here will be automatically evaluated on the KT GPU cluster. - -## - -### 1️⃣ Make sure you can load your model and tokenizer using AutoClasses -```python -from transformers import AutoConfig, AutoModel, AutoTokenizer -config = AutoConfig.from_pretrained("your model name", revision=revision) -model = AutoModel.from_pretrained("your model name", revision=revision) -tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) -``` - -If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. - -⚠️ Make sure your model is public! - -⚠️ Maker sure your model runs with [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - -⚠️ If your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! - -### 2️⃣ Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) -It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! - -### 3️⃣ Make sure your model has an open license! -This is a leaderboard for 🚀 Open Ko-LLMs, and we'd love for as many people as possible to know they can use your model - -### 4️⃣ Fill up your model card -When we add extra information about models to the leaderboard, it will be automatically taken from the model card - -## In case of model failure -If your model is displayed in the `FAILED` category, its execution stopped. Make sure you have followed the above steps first. If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). -""" - -CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results. Authors of open-ko-llm-leaderboard are ordered alphabetically." -CITATION_BUTTON_TEXT = r""" -@misc{open-ko-llm-leaderboard, - author = {Chanjun Park, Hwalsuk Lee, Hyunbyung Park, Hyeonwoo Kim, Sanghoon Kim, Seonghwan Cho, Sunghun Kim, Sukyung Lee}, - title = {Open Ko-LLM Leaderboard}, - year = {2023}, - publisher = {Upstage, National Information Society Agency}, - howpublished = "\url{https://huggingface.co/spaces/upstage/open-ko-llm-leaderboard}" -} -@software{eval-harness, - author = {Gao, Leo and - Tow, Jonathan and - Biderman, Stella and - Black, Sid and - DiPofi, Anthony and - Foster, Charles and - Golding, Laurence and - Hsu, Jeffrey and - McDonell, Kyle and - Muennighoff, Niklas and - Phang, Jason and - Reynolds, Laria and - Tang, Eric and - Thite, Anish and - Wang, Ben and - Wang, Kevin and - Zou, Andy}, - title = {A framework for few-shot language model evaluation}, - month = sep, - year = 2021, - publisher = {Zenodo}, - version = {v0.0.1}, - doi = {10.5281/zenodo.5371628}, - url = {https://doi.org/10.5281/zenodo.5371628} -} -@misc{seo2023kocommongen, - title={Korean Commonsense Reasoning Evaluation for Large Language Models}, - author={Jaehyung Seo, Chanjun Park, Hyeonseok Moon, Sugyeong Eo, Aram So, Heuiseok Lim}, - year={2023}, - affilation={Korea University, NLP&AI}, - booktitle={Proceedings of the 35th Annual Conference on Human & Cognitive Language Technology}} -@misc{park2023koarc, - title={Ko-ARC}, - original_title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge}, - author={Hyunbyung Park, Chanjun Park}, - original_author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord}, - year={2023} -} -@misc{park2023kohellaswag, - title={Ko-HellaSwag}, - original_title={HellaSwag: Can a Machine Really Finish Your Sentence?}, - author={Hyunbyung Park, Chanjun Park}, - original_author={Rowan Zellers and Ari Holtzman and Yonatan Bisk and Ali Farhadi and Yejin Choi}, - year={2023} -} -@misc{park2023kommlu, - title={Ko-MMLU}, - original_title={Measuring Massive Multitask Language Understanding}, - author={Hyunbyung Park, Chanjun Park}, - original_author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt}, - year={2023} -} -@misc{park2023kotruthfulqa, - title={Ko-TruthfulQA}, - original_title={TruthfulQA: Measuring How Models Mimic Human Falsehoods}, - author={Hyunbyung Park, Chanjun Park}, - original_author={Stephanie Lin and Jacob Hilton and Owain Evans}, - year={2023} -} -""" diff --git a/spaces/user238921933/stable-diffusion-webui/modules/script_loading.py b/spaces/user238921933/stable-diffusion-webui/modules/script_loading.py deleted file mode 100644 index b7611ea5f4489edc95f61040e4324124a2e6fefd..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/script_loading.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import sys -import traceback -import importlib.util -from types import ModuleType - - -def load_module(path): - module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) - module = importlib.util.module_from_spec(module_spec) - module_spec.loader.exec_module(module) - - return module - - -def preload_extensions(extensions_dir, parser): - if not os.path.isdir(extensions_dir): - return - - for dirname in sorted(os.listdir(extensions_dir)): - preload_script = os.path.join(extensions_dir, dirname, "preload.py") - if not os.path.isfile(preload_script): - continue - - try: - module = load_module(preload_script) - if hasattr(module, 'preload'): - module.preload(parser) - - except Exception: - print(f"Error running preload() for {preload_script}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) diff --git a/spaces/ussrcccp/White-box-Cartoonization/wbc/cartoonize.py b/spaces/ussrcccp/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/ussrcccp/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path) diff --git a/spaces/vibey/article-summariser-for-final-project/README.md b/spaces/vibey/article-summariser-for-final-project/README.md deleted file mode 100644 index aedfd977dc9f13b483736f726779842b4985290e..0000000000000000000000000000000000000000 --- a/spaces/vibey/article-summariser-for-final-project/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Article Summariser For Final Project -emoji: 👀 -colorFrom: purple -colorTo: gray -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/victor/tailwind-static-space/README.md b/spaces/victor/tailwind-static-space/README.md deleted file mode 100644 index 56efb4a859373561907898afa5fbe0becee08ff3..0000000000000000000000000000000000000000 --- a/spaces/victor/tailwind-static-space/README.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Tailwind Static Space -emoji: 🌈 -colorFrom: purple -colorTo: pink -sdk: static -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/wahaha/u2net_portrait/app.py b/spaces/wahaha/u2net_portrait/app.py deleted file mode 100644 index 07bd688ca020610a606dbe655b8207c3949052fe..0000000000000000000000000000000000000000 --- a/spaces/wahaha/u2net_portrait/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import os - -import gradio as gr - -import sys -sys.path.insert(0, 'U-2-Net') - -from skimage import io, transform - -import numpy as np -from PIL import Image - -from utils.face_seg import FaceSeg -import cv2 - -import requests -import base64 -from io import BytesIO - -segment = FaceSeg() - - -def profuai(im_path, out_path): - r = requests.post( - 'http://nebula.cs.ualberta.ca/predict', - files={ - 'file': open(im_path, 'rb'), - }, - headers={'Host': 'nebula.cs.ualberta.ca', 'Origin': 'http://nebula.cs.ualberta.ca','Referer':'http://nebula.cs.ualberta.ca/', - 'X-Requested-With':'XMLHttpRequest', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:51.0) Gecko/20100101 Firefox/51.0'} - - ) - #print(r) - if (r.status_code == 200): - data = r.text - #data:image/png;base64, - a = data[len("data:image/png;base64,"):] - missing_padding = 4 - len(a) % 4 - if missing_padding: - a += '=' * missing_padding - - img = Image.open(BytesIO(base64.urlsafe_b64decode(a))) - # print(a) - img.save(out_path, quality=80) - else: - raise Exception('error 1001') - - -def process(im): - image = cv2.imread(im.name) - matte = segment.get_mask(image) - - if len(image.shape) == 2: - image = image[:, :, None] - if image.shape[2] == 1: - image = np.repeat(image, 3, axis=2) - elif image.shape[2] == 4: - image = image[:, :, 0:3] - matte = np.repeat(np.asarray(matte)[:, :, None], 3, axis=2) / 255 - foreground = image * matte + np.full(image.shape, 255) * (1 - matte) - cv2.imwrite(im.name, foreground) - - profuai(im.name, im.name) - - return Image.open(im.name) - -title = "U-2-Net" -description = "Gradio demo for U-2-Net, https://github.com/xuebinqin/U-2-Net" -article = "" - -gr.Interface( - process, - [gr.inputs.Image(type="file", label="Input") -], - [gr.outputs.Image(type="pil", label="Output")], - title=title, - description=description, - article=article, - examples=[], - allow_flagging=False, - allow_screenshot=False - ).launch(enable_queue=True,cache_examples=True) diff --git a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_model.py b/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_model.py deleted file mode 100644 index 076362c19c66b9444437b24e142d0909fe9b5d95..0000000000000000000000000000000000000000 --- a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_model.py +++ /dev/null @@ -1,4 +0,0 @@ -from tensorflow.keras.utils import plot_model - -def visualize_model(model, filename): - plot_model(model, to_file=filename, show_shapes=True) \ No newline at end of file diff --git a/spaces/wuxi/Real-CUGAN/README.md b/spaces/wuxi/Real-CUGAN/README.md deleted file mode 100644 index d673114edadba73e80f33a3c71bc0dbee8758cc8..0000000000000000000000000000000000000000 --- a/spaces/wuxi/Real-CUGAN/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Real CUGAN -emoji: 🐢 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: DianXian/Real-CUGAN ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xcgc/SD-webui-controlnet-docker/header_patch.py b/spaces/xcgc/SD-webui-controlnet-docker/header_patch.py deleted file mode 100644 index cf106e1a60283bb72d0881b5509a398123d1d13c..0000000000000000000000000000000000000000 --- a/spaces/xcgc/SD-webui-controlnet-docker/header_patch.py +++ /dev/null @@ -1,39 +0,0 @@ - with gr.Box(visible=os.environ.get("SPACE_ID")): - is_shared_ui = str(os.environ.get("IS_SHARED_UI", "") or "").strip().lower() not in ("", "0", "false", "none", "no") - if is_shared_ui: - import torch - if not torch.cuda.is_available(): - gr.HTML(f""" -
          -

          ▲ Automatic1111's Stable Diffusion WebUI + Mikubill's ControlNet WebUI extension | Running on Hugging Face | Loaded checkpoint: AtoZovyaRPGArtistTools15_sd15V1

          -

          ▲ Docker build from 🐙 GitHub ➔ kalaspuff/stable-diffusion-webui-controlnet-docker / 🤗 Hugging Face ➔ carloscar/stable-diffusion-webui-controlnet-docker

          -

          ▲ This Space is currently running on CPU, which may yield very slow results - you can upgrade for a GPU after duplicating the space.

          -

          ▲ Duplicate this Space to run it privately without a queue, use a GPU for faster generation times, load custom checkpoints, etc.  Duplicate Space

          -
          - """) - else: - gr.HTML(f""" -
          -

          ▲ Automatic1111's Stable Diffusion WebUI + Mikubill's ControlNet WebUI extension | Running on Hugging Face | Loaded checkpoint: AtoZovyaRPGArtistTools15_sd15V1

          -

          ▲ Docker build from 🐙 GitHub ➔ kalaspuff/stable-diffusion-webui-controlnet-docker / 🤗 Hugging Face ➔ carloscar/stable-diffusion-webui-controlnet-docker

          -

          ▲ Duplicate this Space to run it privately without a queue, use extensions, load custom checkpoints, etc.  Duplicate Space

          -
          - """) - elif os.environ.get("SPACE_ID"): - import torch - if not torch.cuda.is_available(): - gr.HTML(f""" -
          -

          ▲ Docker build from 🐙 GitHub ➔ kalaspuff/stable-diffusion-webui-controlnet-docker / 🤗 Hugging Face ➔ carloscar/stable-diffusion-webui-controlnet-docker

          -

          ▲ Load additional checkpoints, VAE, LoRA models, etc. Read more on the README at the GitHub link above.

          -

          ▲ This Space is currently running on CPU, which may yield very slow results - you can upgrade for a GPU in the Settings tab

          -
          - """) - else: - gr.HTML(f""" -
          -

          ▲ Docker build from 🐙 GitHub ➔ kalaspuff/stable-diffusion-webui-controlnet-docker / 🤗 Hugging Face ➔ carloscar/stable-diffusion-webui-controlnet-docker

          -

          ▲ Load additional checkpoints, VAE, LoRA models, etc. Read more on the README at the GitHub link above.

          -

          ▲ This Space has GPU enabled - remember to remove the GPU from the space in the Settings tab when you're done.

          -
          - """) diff --git a/spaces/xcocogoatx/WaifuCreatorAi/start.py b/spaces/xcocogoatx/WaifuCreatorAi/start.py deleted file mode 100644 index ad3a9b5da2ecf5963aac446e1d9b330729276b95..0000000000000000000000000000000000000000 --- a/spaces/xcocogoatx/WaifuCreatorAi/start.py +++ /dev/null @@ -1,12 +0,0 @@ -!pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+189828c.d20221207-cp38-cp38-linux_x86_64.whl - -!git clone https://github.com/camenduru/stable-diffusion-webui -!git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /content/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui -!git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /content/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser -!git clone https://github.com/camenduru/stable-diffusion-webui-huggingface /content/stable-diffusion-webui/extensions/stable-diffusion-webui-huggingface -%cd /content/stable-diffusion-webui - -!wget https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /content/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt -!wget https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /content/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt - -!python launch.py --share --xformers \ No newline at end of file diff --git a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/body/xdecoder_head.py b/spaces/xdecoder/Instruct-X-Decoder/xdecoder/body/xdecoder_head.py deleted file mode 100644 index b04af973501c2c361de2b4a3a78ebbab1ae44b8a..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/body/xdecoder_head.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -# -------------------------------------------------------- -# X-Decoder -- Generalized Decoding for Pixel, Image, and Language -# Copyright (c) 2022 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Jianwei Yang (jianwyan@microsoft.com), Xueyan Zou (xueyan@cs.wisc.edu) -# -------------------------------------------------------- - -from typing import Dict - -from torch import nn - -from detectron2.layers import ShapeSpec - -from .registry import register_body -from .encoder import build_encoder -from .decoder import build_decoder -from ..utils import configurable - - -class XDecoderHead(nn.Module): - - @configurable - def __init__( - self, - input_shape: Dict[str, ShapeSpec], - *, - num_classes: int, - pixel_decoder: nn.Module, - loss_weight: float = 1.0, - ignore_value: int = -1, - # extra parameters - transformer_predictor: nn.Module, - transformer_in_feature: str, - ): - """ - NOTE: this interface is experimental. - Args: - input_shape: shapes (channels and stride) of the input features - num_classes: number of classes to predict - pixel_decoder: the pixel decoder module - loss_weight: loss weight - ignore_value: category id to be ignored during training. - transformer_predictor: the transformer decoder that makes prediction - transformer_in_feature: input feature name to the transformer_predictor - """ - super().__init__() - - input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) - self.in_features = [k for k, v in input_shape] - feature_strides = [v.stride for k, v in input_shape] - feature_channels = [v.channels for k, v in input_shape] - - self.ignore_value = ignore_value - self.common_stride = 4 - self.loss_weight = loss_weight - - self.pixel_decoder = pixel_decoder - self.predictor = transformer_predictor - self.transformer_in_feature = transformer_in_feature - - self.num_classes = num_classes - - @classmethod - def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], lang_encoder: nn.Module, extra: dict): - - in_features_type = cfg['MODEL']['DECODER']['TRANSFORMER_IN_FEATURE'] - enc_cfg = cfg['MODEL']['ENCODER'] - dec_cfg = cfg['MODEL']['DECODER'] - - # figure out in_channels to transformer predictor - if in_features_type == "transformer_encoder": - transformer_predictor_in_channels = enc_cfg['CONVS_DIM'] - elif in_features_type == "pixel_embedding": - transformer_predictor_in_channels = enc_cfg['MASK_DIM'] - elif in_features_type == "multi_scale_pixel_decoder": # for maskformer2 - transformer_predictor_in_channels = enc_cfg['CONVS_DIM'] - else: - transformer_predictor_in_channels = input_shape[dec_cfg['TRANSFORMER_IN_FEATURE']].channels - - return { - "input_shape": { - k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES'] - }, - "ignore_value": enc_cfg['IGNORE_VALUE'], - "num_classes": enc_cfg.get('NUM_CLASSES', None), - "pixel_decoder": build_encoder(cfg, input_shape), - "loss_weight": enc_cfg['LOSS_WEIGHT'], - "transformer_in_feature": dec_cfg['TRANSFORMER_IN_FEATURE'], - "transformer_predictor": build_decoder( - cfg, - transformer_predictor_in_channels, - lang_encoder, - mask_classification=True, - extra=extra, - ), - } - - def forward(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}): - return self.layers(features, mask, target_queries, target_vlp, task, extra) - - def layers(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}): - mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features) - - if self.transformer_in_feature == "multi_scale_pixel_decoder": - predictions = self.predictor(multi_scale_features, mask_features, mask, target_queries, target_vlp, task, extra) - else: - if self.transformer_in_feature == "transformer_encoder": - assert ( - transformer_encoder_features is not None - ), "Please use the TransformerEncoderPixelDecoder." - predictions = self.predictor(transformer_encoder_features, mask_features, mask) - elif self.transformer_in_feature == "pixel_embedding": - predictions = self.predictor(mask_features, mask_features, mask) - else: - predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask) - return predictions - - -@register_body -def get_xdecoder_head(cfg, input_shape, lang_encoder, extra): - return XDecoderHead(cfg, input_shape, lang_encoder, extra) \ No newline at end of file diff --git a/spaces/yash-srivastava19/CodeSmith/chainlit.md b/spaces/yash-srivastava19/CodeSmith/chainlit.md deleted file mode 100644 index 783f1faa6376a7a4b62f18f7700eea3194b16639..0000000000000000000000000000000000000000 --- a/spaces/yash-srivastava19/CodeSmith/chainlit.md +++ /dev/null @@ -1,16 +0,0 @@ -# Welcome to CodeSmith!! - -CodeSmith is a powerful tool designed to help you prototype, debug and learn Python. CodeSmith has been trained on -Python programming problems, and knows a thing or two about maths. Here are some examples to showcase its abilities. - - - - - -## Requirements -Just Langchain, Cohere, and Chainlit. While the model could be improved, the expansion has been left as an exercise for -the community to build upon. - -## Profiles and Contacts - -This project was made entirely by Yashovardhan Srivastava(from data collection to deployment), as a part of summer 2023 vacation boredom. \ No newline at end of file diff --git a/spaces/yavuzkomecoglu/Turkish-Speech-Recognition/utils.py b/spaces/yavuzkomecoglu/Turkish-Speech-Recognition/utils.py deleted file mode 100644 index 91042206d96b9803de22abf3cef718c229c2bad6..0000000000000000000000000000000000000000 --- a/spaces/yavuzkomecoglu/Turkish-Speech-Recognition/utils.py +++ /dev/null @@ -1,99 +0,0 @@ - -import librosa -import torch -import torchaudio -from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, AutoTokenizer -from datasets import load_dataset - -import numpy as np -import re - -chars_to_ignore = [ - ",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�", - "#", "!", "?", "«", "»", "(", ")", "؛", ",", "?", ".", "!", "-", ";", ":", '"', - "“", "%", "‘", "�", "–", "…", "_", "”", '“', '„' -] - -chars_to_mapping = { -"\u200c": " ", "\u200d": " ", "\u200e": " ", "\u200f": " ", "\ufeff": " ", -} - - - -class SpeechRecognition: - def __init__(self): - print("init SpeechRecognition") - - def load_model(self): - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - #self.processor = Wav2Vec2Processor.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-turkish") - #self.model = Wav2Vec2ForCTC.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-turkish").to(self.device) - self.processor = Wav2Vec2Processor.from_pretrained("patrickvonplaten/wav2vec2-common_voice-tr-demo") - self.model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-common_voice-tr-demo").to(self.device) - - return self - - - - def multiple_replace(self, text, chars_to_mapping): - pattern = "|".join(map(re.escape, chars_to_mapping.keys())) - return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text)) - - def remove_special_characters(self, text, chars_to_ignore_regex): - text = re.sub(chars_to_ignore_regex, '', text).lower() + " " - return text - - def normalizer(self, batch, chars_to_ignore, chars_to_mapping): - chars_to_ignore_regex = f"""[{"".join(chars_to_ignore)}]""" - text = batch["sentence"].lower().strip() - - text = text.replace("\u0307", " ").strip() - text = self.multiple_replace(text, chars_to_mapping) - text = self.remove_special_characters(text, chars_to_ignore_regex) - - batch["sentence"] = text - return batch - - - def speech_file_to_array_fn(self, batch): - speech_array, sampling_rate = torchaudio.load(batch["path"]) - speech_array = speech_array.squeeze().numpy() - speech_array = librosa.resample(np.asarray(speech_array), sampling_rate, 16_000) - - batch["speech"] = speech_array - return batch - - - def predict(self, batch): - features = self.processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) - - input_values = features.input_values.to(self.device) - attention_mask = features.attention_mask.to(self.device) - - with torch.no_grad(): - logits = self.model(input_values, attention_mask=attention_mask).logits - - pred_ids = torch.argmax(logits, dim=-1) - - batch["predicted"] = self.processor.batch_decode(pred_ids)[0] - return batch - - def predict_audio_file(self, speech): - features = self.processor(speech, sampling_rate=16_000, return_tensors="pt", padding=True) - - input_values = features.input_values.to(self.device) - attention_mask = features.attention_mask.to(self.device) - - with torch.no_grad(): - logits = self.model(input_values, attention_mask=attention_mask).logits - - pred_ids = torch.argmax(logits, dim=-1) - - transcriptions = self.processor.decode(pred_ids[0]) - return transcriptions - - - def load_speech_with_file(self, audio_file): - speech, rate = librosa.load(audio_file,sr=16000) - - return speech, rate \ No newline at end of file diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/__init__.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/yfyangd/PictureBookUnderstanding/BLIP/models/blip.py b/spaces/yfyangd/PictureBookUnderstanding/BLIP/models/blip.py deleted file mode 100644 index 0a189c6a1c8ba264b4e10d72edc4ba48437c5982..0000000000000000000000000000000000000000 --- a/spaces/yfyangd/PictureBookUnderstanding/BLIP/models/blip.py +++ /dev/null @@ -1,238 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li -''' -import warnings -warnings.filterwarnings("ignore") - -from BLIP.models.vit import VisionTransformer, interpolate_pos_embed -from BLIP.models.med import BertConfig, BertModel, BertLMHeadModel -from transformers import BertTokenizer - -import torch -from torch import nn -import torch.nn.functional as F - -import os -from urllib.parse import urlparse -from timm.models.hub import download_cached_file - -class BLIP_Base(nn.Module): - def __init__(self, - med_config = 'BLIP/configs/med_config.json', - image_size = 224, - vit = 'base', - vit_grad_ckpt = False, - vit_ckpt_layer = 0, - ): - """ - Args: - med_config (str): path for the mixture of encoder-decoder model's configuration file - image_size (int): input image size - vit (str): model size of vision transformer - """ - super().__init__() - - self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) - self.tokenizer = init_tokenizer() - med_config = BertConfig.from_json_file(med_config) - med_config.encoder_width = vision_width - self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) - - - def forward(self, image, caption, mode): - - assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" - text = self.tokenizer(caption, return_tensors="pt").to(image.device) - - if mode=='image': - # return image features - image_embeds = self.visual_encoder(image) - return image_embeds - - elif mode=='text': - # return text features - text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, - return_dict = True, mode = 'text') - return text_output.last_hidden_state - - elif mode=='multimodal': - # return multimodel features - image_embeds = self.visual_encoder(image) - image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) - - text.input_ids[:,0] = self.tokenizer.enc_token_id - output = self.text_encoder(text.input_ids, - attention_mask = text.attention_mask, - encoder_hidden_states = image_embeds, - encoder_attention_mask = image_atts, - return_dict = True, - ) - return output.last_hidden_state - - - -class BLIP_Decoder(nn.Module): - def __init__(self, - med_config = 'BLIP/configs/med_config.json', - image_size = 384, - vit = 'base', - vit_grad_ckpt = False, - vit_ckpt_layer = 0, - prompt = 'a picture of ', - ): - """ - Args: - med_config (str): path for the mixture of encoder-decoder model's configuration file - image_size (int): input image size - vit (str): model size of vision transformer - """ - super().__init__() - - self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) - self.tokenizer = init_tokenizer() - med_config = BertConfig.from_json_file(med_config) - med_config.encoder_width = vision_width - self.text_decoder = BertLMHeadModel(config=med_config) - - self.prompt = prompt - self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 - - - def forward(self, image, caption): - - image_embeds = self.visual_encoder(image) - image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) - - text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) - - text.input_ids[:,0] = self.tokenizer.bos_token_id - - decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) - decoder_targets[:,:self.prompt_length] = -100 - - decoder_output = self.text_decoder(text.input_ids, - attention_mask = text.attention_mask, - encoder_hidden_states = image_embeds, - encoder_attention_mask = image_atts, - labels = decoder_targets, - return_dict = True, - ) - loss_lm = decoder_output.loss - - return loss_lm - - def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): - image_embeds = self.visual_encoder(image) - - if not sample: - image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) - - image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) - model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} - - prompt = [self.prompt] * image.size(0) - input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) - input_ids[:,0] = self.tokenizer.bos_token_id - input_ids = input_ids[:, :-1] - - if sample: - #nucleus sampling - outputs = self.text_decoder.generate(input_ids=input_ids, - max_length=max_length, - min_length=min_length, - do_sample=True, - top_p=top_p, - num_return_sequences=1, - eos_token_id=self.tokenizer.sep_token_id, - pad_token_id=self.tokenizer.pad_token_id, - repetition_penalty=1.1, - **model_kwargs) - else: - #beam search - outputs = self.text_decoder.generate(input_ids=input_ids, - max_length=max_length, - min_length=min_length, - num_beams=num_beams, - eos_token_id=self.tokenizer.sep_token_id, - pad_token_id=self.tokenizer.pad_token_id, - repetition_penalty=repetition_penalty, - **model_kwargs) - - captions = [] - for output in outputs: - caption = self.tokenizer.decode(output, skip_special_tokens=True) - captions.append(caption[len(self.prompt):]) - return captions - - -def blip_decoder(pretrained='',**kwargs): - model = BLIP_Decoder(**kwargs) - if pretrained: - model,msg = load_checkpoint(model,pretrained) - assert(len(msg.missing_keys)==0) - return model - -def blip_feature_extractor(pretrained='',**kwargs): - model = BLIP_Base(**kwargs) - if pretrained: - model,msg = load_checkpoint(model,pretrained) - assert(len(msg.missing_keys)==0) - return model - -def init_tokenizer(): - tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') - tokenizer.add_special_tokens({'bos_token':'[DEC]'}) - tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) - tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] - return tokenizer - - -def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): - - assert vit in ['base', 'large'], "vit parameter must be base or large" - if vit=='base': - vision_width = 768 - visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, - num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, - drop_path_rate=0 or drop_path_rate - ) - elif vit=='large': - vision_width = 1024 - visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, - num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, - drop_path_rate=0.1 or drop_path_rate - ) - return visual_encoder, vision_width - -def is_url(url_or_filename): - parsed = urlparse(url_or_filename) - return parsed.scheme in ("http", "https") - -def load_checkpoint(model,url_or_filename): - if is_url(url_or_filename): - cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) - checkpoint = torch.load(cached_file, map_location='cpu') - elif os.path.isfile(url_or_filename): - checkpoint = torch.load(url_or_filename, map_location='cpu') - else: - raise RuntimeError('checkpoint url or path is invalid') - - state_dict = checkpoint['model'] - - state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) - if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): - state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], - model.visual_encoder_m) - for key in model.state_dict().keys(): - if key in state_dict.keys(): - if state_dict[key].shape!=model.state_dict()[key].shape: - del state_dict[key] - - msg = model.load_state_dict(state_dict,strict=False) - print('load checkpoint from %s'%url_or_filename) - return model,msg - diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/modeling_ibert.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/modeling_ibert.py deleted file mode 100644 index 0dcdaaf6998fd27fcf89dea2ece897ef92ad9aa5..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/modeling_ibert.py +++ /dev/null @@ -1,1356 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, -# Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. -# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PyTorch I-BERT model.""" - -import math -from typing import Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -from ...activations import gelu -from ...modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPoolingAndCrossAttentions, - MaskedLMOutput, - MultipleChoiceModelOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging -from .configuration_ibert import IBertConfig -from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "kssteven/ibert-roberta-base" -_CONFIG_FOR_DOC = "IBertConfig" - -IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "kssteven/ibert-roberta-base", - "kssteven/ibert-roberta-large", - "kssteven/ibert-roberta-large-mnli", -] - - -class IBertEmbeddings(nn.Module): - """ - Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. - """ - - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.embedding_bit = 8 - self.embedding_act_bit = 16 - self.act_bit = 8 - self.ln_input_bit = 22 - self.ln_output_bit = 32 - - self.word_embeddings = QuantEmbedding( - config.vocab_size, - config.hidden_size, - padding_idx=config.pad_token_id, - weight_bit=self.embedding_bit, - quant_mode=self.quant_mode, - ) - self.token_type_embeddings = QuantEmbedding( - config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode - ) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - - # End copy - self.padding_idx = config.pad_token_id - self.position_embeddings = QuantEmbedding( - config.max_position_embeddings, - config.hidden_size, - padding_idx=self.padding_idx, - weight_bit=self.embedding_bit, - quant_mode=self.quant_mode, - ) - - # Integer-only addition between embeddings - self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) - self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = IntLayerNorm( - config.hidden_size, - eps=config.layer_norm_eps, - output_bit=self.ln_output_bit, - quant_mode=self.quant_mode, - force_dequant=config.force_dequant, - ) - self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward( - self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 - ): - if position_ids is None: - if input_ids is not None: - # Create the position ids from the input token ids. Any padded tokens remain padded. - position_ids = create_position_ids_from_input_ids( - input_ids, self.padding_idx, past_key_values_length - ).to(input_ids.device) - else: - position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) - - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) - - if inputs_embeds is None: - inputs_embeds, inputs_embeds_scaling_factor = self.word_embeddings(input_ids) - else: - inputs_embeds_scaling_factor = None - token_type_embeddings, token_type_embeddings_scaling_factor = self.token_type_embeddings(token_type_ids) - - embeddings, embeddings_scaling_factor = self.embeddings_act1( - inputs_embeds, - inputs_embeds_scaling_factor, - identity=token_type_embeddings, - identity_scaling_factor=token_type_embeddings_scaling_factor, - ) - - if self.position_embedding_type == "absolute": - position_embeddings, position_embeddings_scaling_factor = self.position_embeddings(position_ids) - embeddings, embeddings_scaling_factor = self.embeddings_act1( - embeddings, - embeddings_scaling_factor, - identity=position_embeddings, - identity_scaling_factor=position_embeddings_scaling_factor, - ) - - embeddings, embeddings_scaling_factor = self.LayerNorm(embeddings, embeddings_scaling_factor) - embeddings = self.dropout(embeddings) - embeddings, embeddings_scaling_factor = self.output_activation(embeddings, embeddings_scaling_factor) - return embeddings, embeddings_scaling_factor - - def create_position_ids_from_inputs_embeds(self, inputs_embeds): - """ - We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. - - Args: - inputs_embeds: torch.Tensor - - Returns: torch.Tensor - """ - input_shape = inputs_embeds.size()[:-1] - sequence_length = input_shape[1] - - position_ids = torch.arange( - self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device - ) - return position_ids.unsqueeze(0).expand(input_shape) - - -class IBertSelfAttention(nn.Module): - def __init__(self, config): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - self.quant_mode = config.quant_mode - self.weight_bit = 8 - self.bias_bit = 32 - self.act_bit = 8 - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - # Q, K, V Linear layers - self.query = QuantLinear( - config.hidden_size, - self.all_head_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - self.key = QuantLinear( - config.hidden_size, - self.all_head_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - self.value = QuantLinear( - config.hidden_size, - self.all_head_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - - # Requantization (32bit -> 8bit) for Q, K, V activations - self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if self.position_embedding_type != "absolute": - raise ValueError("I-BERT only supports 'absolute' for `config.position_embedding_type`") - - self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - hidden_states_scaling_factor, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - # Projection - mixed_query_layer, mixed_query_layer_scaling_factor = self.query(hidden_states, hidden_states_scaling_factor) - mixed_key_layer, mixed_key_layer_scaling_factor = self.key(hidden_states, hidden_states_scaling_factor) - mixed_value_layer, mixed_value_layer_scaling_factor = self.value(hidden_states, hidden_states_scaling_factor) - - # Requantization - query_layer, query_layer_scaling_factor = self.query_activation( - mixed_query_layer, mixed_query_layer_scaling_factor - ) - key_layer, key_layer_scaling_factor = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor) - value_layer, value_layer_scaling_factor = self.value_activation( - mixed_value_layer, mixed_value_layer_scaling_factor - ) - - # Transpose - query_layer = self.transpose_for_scores(query_layer) - key_layer = self.transpose_for_scores(key_layer) - value_layer = self.transpose_for_scores(value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - scale = math.sqrt(self.attention_head_size) - attention_scores = attention_scores / scale - if self.quant_mode: - attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale - else: - attention_scores_scaling_factor = None - - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in IBertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs, attention_probs_scaling_factor = self.softmax( - attention_scores, attention_scores_scaling_factor - ) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - if attention_probs_scaling_factor is not None: - context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor - else: - context_layer_scaling_factor = None - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - # requantization: 32-bit -> 8-bit - context_layer, context_layer_scaling_factor = self.output_activation( - context_layer, context_layer_scaling_factor - ) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - output_scaling_factor = ( - (context_layer_scaling_factor, attention_probs_scaling_factor) - if output_attentions - else (context_layer_scaling_factor,) - ) - - return outputs, output_scaling_factor - - -class IBertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.act_bit = 8 - self.weight_bit = 8 - self.bias_bit = 32 - self.ln_input_bit = 22 - self.ln_output_bit = 32 - - self.dense = QuantLinear( - config.hidden_size, - config.hidden_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) - self.LayerNorm = IntLayerNorm( - config.hidden_size, - eps=config.layer_norm_eps, - output_bit=self.ln_output_bit, - quant_mode=self.quant_mode, - force_dequant=config.force_dequant, - ) - self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): - hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) - hidden_states = self.dropout(hidden_states) - hidden_states, hidden_states_scaling_factor = self.ln_input_act( - hidden_states, - hidden_states_scaling_factor, - identity=input_tensor, - identity_scaling_factor=input_tensor_scaling_factor, - ) - hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor) - - hidden_states, hidden_states_scaling_factor = self.output_activation( - hidden_states, hidden_states_scaling_factor - ) - return hidden_states, hidden_states_scaling_factor - - -class IBertAttention(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.self = IBertSelfAttention(config) - self.output = IBertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - hidden_states_scaling_factor, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - self_outputs, self_outputs_scaling_factor = self.self( - hidden_states, - hidden_states_scaling_factor, - attention_mask, - head_mask, - output_attentions, - ) - attention_output, attention_output_scaling_factor = self.output( - self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor - ) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:] - return outputs, outputs_scaling_factor - - -class IBertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.act_bit = 8 - self.weight_bit = 8 - self.bias_bit = 32 - self.dense = QuantLinear( - config.hidden_size, - config.intermediate_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - if config.hidden_act != "gelu": - raise ValueError("I-BERT only supports 'gelu' for `config.hidden_act`") - self.intermediate_act_fn = IntGELU(quant_mode=self.quant_mode, force_dequant=config.force_dequant) - self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - - def forward(self, hidden_states, hidden_states_scaling_factor): - hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) - hidden_states, hidden_states_scaling_factor = self.intermediate_act_fn( - hidden_states, hidden_states_scaling_factor - ) - - # Requantization: 32bit -> 8-bit - hidden_states, hidden_states_scaling_factor = self.output_activation( - hidden_states, hidden_states_scaling_factor - ) - return hidden_states, hidden_states_scaling_factor - - -class IBertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.act_bit = 8 - self.weight_bit = 8 - self.bias_bit = 32 - self.ln_input_bit = 22 - self.ln_output_bit = 32 - - self.dense = QuantLinear( - config.intermediate_size, - config.hidden_size, - bias=True, - weight_bit=self.weight_bit, - bias_bit=self.bias_bit, - quant_mode=self.quant_mode, - per_channel=True, - ) - self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) - self.LayerNorm = IntLayerNorm( - config.hidden_size, - eps=config.layer_norm_eps, - output_bit=self.ln_output_bit, - quant_mode=self.quant_mode, - force_dequant=config.force_dequant, - ) - self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): - hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) - hidden_states = self.dropout(hidden_states) - hidden_states, hidden_states_scaling_factor = self.ln_input_act( - hidden_states, - hidden_states_scaling_factor, - identity=input_tensor, - identity_scaling_factor=input_tensor_scaling_factor, - ) - hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor) - - hidden_states, hidden_states_scaling_factor = self.output_activation( - hidden_states, hidden_states_scaling_factor - ) - return hidden_states, hidden_states_scaling_factor - - -class IBertLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.act_bit = 8 - - self.seq_len_dim = 1 - self.attention = IBertAttention(config) - self.intermediate = IBertIntermediate(config) - self.output = IBertOutput(config) - - self.pre_intermediate_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) - self.pre_output_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) - - def forward( - self, - hidden_states, - hidden_states_scaling_factor, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - self_attention_outputs, self_attention_outputs_scaling_factor = self.attention( - hidden_states, - hidden_states_scaling_factor, - attention_mask, - head_mask, - output_attentions=output_attentions, - ) - attention_output = self_attention_outputs[0] - attention_output_scaling_factor = self_attention_outputs_scaling_factor[0] - - outputs = self_attention_outputs[1:] # add self attentions if we output attention weights - - layer_output, layer_output_scaling_factor = self.feed_forward_chunk( - attention_output, attention_output_scaling_factor - ) - outputs = (layer_output,) + outputs - - return outputs - - def feed_forward_chunk(self, attention_output, attention_output_scaling_factor): - attention_output, attention_output_scaling_factor = self.pre_intermediate_act( - attention_output, attention_output_scaling_factor - ) - intermediate_output, intermediate_output_scaling_factor = self.intermediate( - attention_output, attention_output_scaling_factor - ) - - intermediate_output, intermediate_output_scaling_factor = self.pre_output_act( - intermediate_output, intermediate_output_scaling_factor - ) - layer_output, layer_output_scaling_factor = self.output( - intermediate_output, intermediate_output_scaling_factor, attention_output, attention_output_scaling_factor - ) - return layer_output, layer_output_scaling_factor - - -class IBertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.quant_mode = config.quant_mode - self.layer = nn.ModuleList([IBertLayer(config) for _ in range(config.num_hidden_layers)]) - - def forward( - self, - hidden_states, - hidden_states_scaling_factor, - attention_mask=None, - head_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = None # `config.add_cross_attention` is not supported - next_decoder_cache = None # `config.use_cache` is not supported - - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - - layer_outputs = layer_module( - hidden_states, - hidden_states_scaling_factor, - attention_mask, - layer_head_mask, - output_attentions, - ) - - hidden_states = layer_outputs[0] - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class IBertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.quant_mode = config.quant_mode - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class IBertPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = IBertConfig - base_model_prefix = "ibert" - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, (QuantLinear, nn.Linear)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, (QuantEmbedding, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, (IntLayerNorm, nn.LayerNorm)): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def resize_token_embeddings(self, new_num_tokens=None): - raise NotImplementedError("`resize_token_embeddings` is not supported for I-BERT.") - - -IBERT_START_DOCSTRING = r""" - - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`IBertConfig`]): Model configuration class with all the parameters of the - model. Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -IBERT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top.", - IBERT_START_DOCSTRING, -) -class IBertModel(IBertPreTrainedModel): - """ - - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in [Attention is - all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - self.quant_mode = config.quant_mode - - self.embeddings = IBertEmbeddings(config) - self.encoder = IBertEncoder(config) - - self.pooler = IBertPooler(config) if add_pooling_layer else None - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPoolingAndCrossAttentions, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - batch_size, seq_length = input_shape - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length)), device=device) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - embedding_output, embedding_output_scaling_factor = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - ) - encoder_outputs = self.encoder( - embedding_output, - embedding_output_scaling_factor, - attention_mask=extended_attention_mask, - head_mask=head_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - -@add_start_docstrings("""I-BERT Model with a `language modeling` head on top.""", IBERT_START_DOCSTRING) -class IBertForMaskedLM(IBertPreTrainedModel): - _tied_weights_keys = ["lm_head.decoder.bias", "lm_head.decoder.weight"] - - def __init__(self, config): - super().__init__(config) - - self.ibert = IBertModel(config, add_pooling_layer=False) - self.lm_head = IBertLMHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.lm_head.decoder - - def set_output_embeddings(self, new_embeddings): - self.lm_head.decoder = new_embeddings - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MaskedLMOutput, - config_class=_CONFIG_FOR_DOC, - mask="", - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - kwargs (`Dict[str, any]`, optional, defaults to *{}*): - Used to hide legacy arguments that have been deprecated. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.ibert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = outputs[0] - prediction_scores = self.lm_head(sequence_output) - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return MaskedLMOutput( - loss=masked_lm_loss, - logits=prediction_scores, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -class IBertLMHead(nn.Module): - """I-BERT Head for masked language modeling.""" - - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - self.decoder = nn.Linear(config.hidden_size, config.vocab_size) - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - self.decoder.bias = self.bias - - def forward(self, features, **kwargs): - x = self.dense(features) - x = gelu(x) - x = self.layer_norm(x) - - # project back to size of vocabulary with bias - x = self.decoder(x) - - return x - - def _tie_weights(self): - # To tie those two weights if they get disconnected (on TPU or when the bias is resized) - self.bias = self.decoder.bias - - -@add_start_docstrings( - """ - I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled - output) e.g. for GLUE tasks. - """, - IBERT_START_DOCSTRING, -) -class IBertForSequenceClassification(IBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.ibert = IBertModel(config, add_pooling_layer=False) - self.classifier = IBertClassificationHead(config) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=SequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.ibert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = outputs[0] - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - if self.config.problem_type is None: - if self.num_labels == 1: - self.config.problem_type = "regression" - elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): - self.config.problem_type = "single_label_classification" - else: - self.config.problem_type = "multi_label_classification" - - if self.config.problem_type == "regression": - loss_fct = MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return SequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a - softmax) e.g. for RocStories/SWAG tasks. - """, - IBERT_START_DOCSTRING, -) -class IBertForMultipleChoice(IBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.ibert = IBertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., - num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See - `input_ids` above) - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] - - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None - flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None - flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None - flat_inputs_embeds = ( - inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) - if inputs_embeds is not None - else None - ) - - outputs = self.ibert( - flat_input_ids, - position_ids=flat_position_ids, - token_type_ids=flat_token_type_ids, - attention_mask=flat_attention_mask, - head_mask=head_mask, - inputs_embeds=flat_inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, num_choices) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - - if not return_dict: - output = (reshaped_logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return MultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for - Named-Entity-Recognition (NER) tasks. - """, - IBERT_START_DOCSTRING, -) -class IBertForTokenClassification(IBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.ibert = IBertModel(config, add_pooling_layer=False) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.ibert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return TokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -class IBertClassificationHead(nn.Module): - """Head for sentence-level classification tasks.""" - - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.out_proj = nn.Linear(config.hidden_size, config.num_labels) - - def forward(self, features, **kwargs): - hidden_states = features[:, 0, :] # take token (equiv. to [CLS]) - hidden_states = self.dropout(hidden_states) - hidden_states = self.dense(hidden_states) - hidden_states = torch.tanh(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.out_proj(hidden_states) - return hidden_states - - -@add_start_docstrings( - """ - I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layers on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - IBERT_START_DOCSTRING, -) -class IBertForQuestionAnswering(IBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.ibert = IBertModel(config, add_pooling_layer=False) - self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=QuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - start_positions: Optional[torch.LongTensor] = None, - end_positions: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: - r""" - start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.ibert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1).contiguous() - end_logits = end_logits.squeeze(-1).contiguous() - - total_loss = None - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions = start_positions.clamp(0, ignored_index) - end_positions = end_positions.clamp(0, ignored_index) - - loss_fct = CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - total_loss = (start_loss + end_loss) / 2 - - if not return_dict: - output = (start_logits, end_logits) + outputs[2:] - return ((total_loss,) + output) if total_loss is not None else output - - return QuestionAnsweringModelOutput( - loss=total_loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): - """ - Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols - are ignored. This is modified from fairseq's *utils.make_positions*. - - Args: - input_ids (`torch.LongTensor`): - Indices of input sequence tokens in the vocabulary. - - Returns: torch.Tensor - """ - # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. - mask = input_ids.ne(padding_idx).int() - incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask - return incremental_indices.long() + padding_idx diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/idefics/modeling_idefics.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/idefics/modeling_idefics.py deleted file mode 100644 index 316f36561308f046cef2bca9ec9af0fe7bba4d6f..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/idefics/modeling_idefics.py +++ /dev/null @@ -1,1594 +0,0 @@ -# coding=utf-8 -# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch Idefics model.""" -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss - -from ... import PreTrainedModel -from ...activations import ACT2FN -from ...modeling_outputs import ModelOutput -from ...modeling_utils import PretrainedConfig -from ...pytorch_utils import ALL_LAYERNORM_LAYERS -from ...utils import ( - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_idefics import IdeficsConfig -from .perceiver import IdeficsPerceiverResampler -from .vision import IdeficsVisionTransformer - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "IdeficsConfig" - -IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "HuggingFaceM4/idefics-9b", - "HuggingFaceM4/idefics-80b", - # See all Idefics models at https://huggingface.co/models?filter=idefics -] - - -@dataclass -class IdeficsBaseModelOutputWithPast(ModelOutput): - """ - Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding). - - Args: - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - - If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, - hidden_size)` is output. - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape - `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if - `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, - encoder_sequence_length, embed_size_per_head)`. - - Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if - `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` - input) to speed up sequential decoding. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): - Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, - sequence_length, hidden_size)`. - - image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver - """ - - last_hidden_state: torch.FloatTensor = None - past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class IdeficsCausalLMOutputWithPast(ModelOutput): - """ - Base class for Idefics causal language model (or autoregressive) outputs. - - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): - Language modeling loss (for next-token prediction). - logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape - `(batch_size, num_heads, sequence_length, embed_size_per_head)`) - - Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see - `past_key_values` input) to speed up sequential decoding. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): - Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, - sequence_length, hidden_size)`. - - image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver - """ - - loss: Optional[torch.FloatTensor] = None - logits: torch.FloatTensor = None - past_key_values: Optional[List[torch.FloatTensor]] = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None - - -def expand_inputs_for_generation( - input_ids, - expand_size=1, - is_encoder_decoder=False, - attention_mask=None, - encoder_outputs=None, - **model_kwargs, -): - expanded_return_idx = ( - torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) - ) - input_ids = input_ids.index_select(0, expanded_return_idx) - model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None) - model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None) - model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None) - model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None) - - if "token_type_ids" in model_kwargs: - token_type_ids = model_kwargs["token_type_ids"] - model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) - - if attention_mask is not None: - model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) - - if model_kwargs["image_attention_mask"] is not None: - model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select( - 0, expanded_return_idx - ) - - if model_kwargs["pixel_values"] is not None: - model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx) - - elif model_kwargs["image_encoder_embeddings"] is not None: - model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select( - 0, expanded_return_idx - ) - - elif model_kwargs["perceiver_embeddings"] is not None: - model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select( - 0, expanded_return_idx - ) - - return input_ids, model_kwargs - - -def update_model_kwargs_for_generation(outputs, model_kwargs): - # must have this key set to at least None - if "past_key_values" in outputs: - model_kwargs["past_key_values"] = outputs.past_key_values - else: - model_kwargs["past_key_values"] = None - - # update token_type_ids with last value - if "token_type_ids" in model_kwargs: - token_type_ids = model_kwargs["token_type_ids"] - model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) - - # update attention masks - if "attention_mask" in model_kwargs: - attention_mask = model_kwargs["attention_mask"] - model_kwargs["attention_mask"] = torch.cat( - [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 - ) - if "image_attention_mask" in model_kwargs: - image_attention_mask = model_kwargs["image_attention_mask"] - last_mask = image_attention_mask[:, -1, :].unsqueeze(1) - model_kwargs["image_attention_mask"] = last_mask - - # Get the precomputed image_hidden_states - model_kwargs["image_hidden_states"] = outputs.image_hidden_states - - return model_kwargs - - -def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): - token_type_ids = kwargs.get("token_type_ids", None) - # only last token for inputs_ids if past is defined in kwargs - if past_key_values: - input_ids = input_ids[:, -1].unsqueeze(-1) - if token_type_ids is not None: - token_type_ids = token_type_ids[:, -1].unsqueeze(-1) - - attention_mask = kwargs.get("attention_mask", None) - position_ids = kwargs.get("position_ids", None) - - if attention_mask is not None and position_ids is None: - # create position_ids on the fly for batch generation - position_ids = attention_mask.long().cumsum(-1) - 1 - position_ids.masked_fill_(attention_mask == 0, 1) - if past_key_values: - position_ids = position_ids[:, -1].unsqueeze(-1) - - pixel_values = kwargs.get("pixel_values", None) - image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None) - perceiver_embeddings = kwargs.get("perceiver_embeddings", None) - image_attention_mask = kwargs.get("image_attention_mask", None) - interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False) - - return { - "input_ids": input_ids, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "position_ids": position_ids, - "attention_mask": attention_mask, - "token_type_ids": token_type_ids, - "pixel_values": pixel_values, - "image_encoder_embeddings": image_encoder_embeddings, - "perceiver_embeddings": perceiver_embeddings, - "image_attention_mask": image_attention_mask, - "interpolate_pos_encoding": interpolate_pos_encoding, - } - - -def freeze_model(model, module_exceptions=[]): - mapping = { - "LayerNorm": nn.LayerNorm, - "Linear": nn.Linear, - "Embedding": nn.Embedding, - } - module_exceptions_mapped = [mapping[m] for m in module_exceptions] - for module in model.modules(): - if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped): - module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes - else: - module.requires_grad_(False) - return model - - -class IdeficsDecoupledEmbedding(nn.Embedding): - # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding - """ - Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the - regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0, - then it will create `num_additional_embeddings` additional parameters that are always trained. If - `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`. - """ - - def __init__( - self, - num_embeddings, - num_additional_embeddings, - embedding_dim, - partially_freeze: Optional[bool] = False, - device=None, - dtype=None, - padding_idx=None, - **kwargs, - ) -> None: - """ - Args: - num_embeddings (`int`): - Size of the dictionary of embeddings - num_additional_embeddings (`int`): - Number of additional embeddings. Only useful when you `partially_freeze=True`. - embedding_dim (`int`): - The size of each embedding vector - partially_freeze: (`bool`, *optional*, defaults to `False`): - If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen. - padding_idx (`int`, *optional*): - The padding index (needs to be less than num_embeddings) - - Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`, - `max_norm` or `norm_type`. We are not supporting these. - """ - if padding_idx is not None and padding_idx > num_embeddings: - raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}") - super().__init__( - num_embeddings=num_embeddings, - embedding_dim=embedding_dim, - device=device, - dtype=dtype, - padding_idx=padding_idx, - **kwargs, - ) - self.num_embeddings = num_embeddings - self.padding_idx = padding_idx - self.num_additional_embeddings = num_additional_embeddings - self.partially_freeze = partially_freeze - - if partially_freeze: - self.weight.requires_grad_(False) - - if self.num_additional_embeddings > 0: - self.additional_embedding = nn.Embedding( - num_embeddings=self.num_additional_embeddings, - embedding_dim=embedding_dim, - device=device, - dtype=dtype, - ) - - def forward(self, input_ids): - """ - we have 2 embeddings, with different indices - one pretrained self.weight and another - self.additional_embedding.weight that is being trained. - - in order to make a lookup of the input ids, we: - 1. find out the indices of the entries belonging to the 2nd embedding - 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd - embedding starts from 0 and not num_embeddings - 3. perform the 2nd embedding lookup - 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index - 5. perform the 1st embedding lookup - 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup - - note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but - then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices - - i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are - usually relatively short it's probably not faster or if faster not by much - but might be a good idea to - measure. - - """ - if self.num_additional_embeddings == 0: - return F.embedding(input_ids, self.weight) - - # Clone so that we don't modify the original input_ids later on - input_ids = input_ids.clone() - additional_vocab_indices = torch.where(input_ids >= self.num_embeddings) - input_ids_additional_vocab = input_ids[additional_vocab_indices] - additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) - - # for successful lookup replace input_ids with 0, the results of these will be discarded anyway - input_ids[additional_vocab_indices] = 0 - full_vector = F.embedding(input_ids, self.weight) - - # overwrite the records with high indices - full_vector[additional_vocab_indices] = additional_embeddings - - return full_vector - - def extra_repr(self) -> str: - return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format( - self.num_embeddings, - self.num_additional_embeddings, - self.embedding_dim, - self.partially_freeze, - ) - - -class IdeficsDecoupledLinear(nn.Linear): - # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear - """ - Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the - regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0, - then it will create `out_additional_features * in_features` additional parameters that are always trained. If - `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`. - """ - - def __init__( - self, - in_features: int, - out_features: int, - out_additional_features: int = 0, - bias: bool = True, - partially_freeze: bool = True, - device=None, - dtype=None, - ) -> None: - """ - out_additional_features: int. Number of additional trainable dimensions. Only makes sense when - `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra - parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear. - """ - super().__init__(in_features, out_features, bias, device, dtype) - self.out_additional_features = out_additional_features - self.partially_freeze = partially_freeze - - self.in_features = in_features - self.out_features = out_features - - if partially_freeze: - self.weight.requires_grad_(False) - if bias: - self.bias.requires_grad_(False) - - if out_additional_features > 0: - self.additional_fc = nn.Linear( - in_features=in_features, - out_features=out_additional_features, - bias=bias, - device=device, - dtype=dtype, - ) - - def forward(self, input: torch.Tensor) -> torch.Tensor: - output = F.linear(input, self.weight, self.bias) - - if self.out_additional_features > 0: - additional_features = self.additional_fc(input) - output = torch.cat((output, additional_features), -1) - - return output - - def extra_repr(self) -> str: - """Overwriting `nn.Linear.extra_repr` to include new parameters.""" - return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format( - self.in_features, - self.out_features, - self.out_additional_features, - self.bias is not None, - self.partially_freeze, - ) - - -# Copied from transformers.models.bart.modeling_bart._make_causal_mask -def _make_causal_mask( - input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 -): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) - mask_cond = torch.arange(mask.size(-1), device=device) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) - mask = mask.to(dtype) - - if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) - - -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - - -# this was adapted from LlamaRMSNorm -class IdeficsRMSNorm(nn.Module): - def __init__(self, hidden_size, eps=1e-6): - """ - IdeficsRMSNorm is equivalent to T5LayerNorm - """ - super().__init__() - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.variance_epsilon = eps - - def forward(self, hidden_states): - variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) - - # convert into half-precision if necessary - if self.weight.dtype in [torch.float16, torch.bfloat16]: - hidden_states = hidden_states.to(self.weight.dtype) - - return self.weight * hidden_states - - -ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm) - - -# this was adapted from LlamaRotaryEmbedding -class IdeficsEmbedding(torch.nn.Module): - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): - super().__init__() - - self.dim = dim - self.max_position_embeddings = max_position_embeddings - self.base = base - inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) - self.register_buffer("inv_freq", inv_freq, persistent=False) - - # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache( - seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() - ) - - def _set_cos_sin_cache(self, seq_len, device, dtype): - self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) - - freqs = torch.einsum("i,j->ij", t, self.inv_freq) - # Different from paper, but it uses a different permutation in order to obtain the same calculation - emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) - - def forward(self, x, seq_len=None): - # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.max_seq_len_cached: - self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) - - return ( - self.cos_cached[:seq_len].to(dtype=x.dtype), - self.sin_cached[:seq_len].to(dtype=x.dtype), - ) - - -def rotate_half(x): - """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] - return torch.cat((-x2, x1), dim=-1) - - -# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) - q_embed = (q * cos) + (rotate_half(q) * sin) - k_embed = (k * cos) + (rotate_half(k) * sin) - return q_embed, k_embed - - -# this was adapted from LlamaMLP -class IdeficsMLP(nn.Module): - def __init__( - self, - hidden_size: int, - intermediate_size: int, - hidden_act: str, - ): - super().__init__() - self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) - self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) - self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) - self.act_fn = ACT2FN[hidden_act] - - def forward(self, x): - return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) - - -# this was adapted from LlamaAttention -class IdeficsAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__( - self, - hidden_size: int, - num_heads: int, - dropout: float = 0.0, - is_cross_attention: bool = False, - config: PretrainedConfig = None, - qk_layer_norms: bool = False, - ): - super().__init__() - self.hidden_size = hidden_size - self.num_heads = num_heads - self.head_dim = hidden_size // num_heads - self.dropout = dropout - - if (self.head_dim * num_heads) != self.hidden_size: - raise ValueError( - f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" - f" and `num_heads`: {num_heads})." - ) - - self.is_cross_attention = is_cross_attention - - if not hasattr(nn.functional, "scaled_dot_product_attention"): - raise ValueError("this model requires pytorch 2.0 or higher") - - if self.is_cross_attention: - kv_input_dim = ( - self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim - ) - self.q_proj = nn.Linear( - self.hidden_size, - num_heads * self.head_dim, - bias=False, - ) - self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False) - self.v_proj = nn.Linear( - kv_input_dim, - num_heads * self.head_dim, - bias=False, - ) - else: - self.q_proj = nn.Linear( - self.hidden_size, - num_heads * self.head_dim, - bias=False, - ) - self.k_proj = nn.Linear( - self.hidden_size, - num_heads * self.head_dim, - bias=False, - ) - self.v_proj = nn.Linear( - self.hidden_size, - num_heads * self.head_dim, - bias=False, - ) - self.o_proj = nn.Linear( - num_heads * self.head_dim, - hidden_size, - bias=False, - ) - self.rotary_emb = IdeficsEmbedding(self.head_dim) - - self.qk_layer_norms = qk_layer_norms - if self.qk_layer_norms: - self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) - self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - key_value_states: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - # if key_value_states are provided this layer is used as a cross-attention layer - is_cross_attention = self.is_cross_attention or key_value_states is not None - - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - if not is_cross_attention: - key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - else: - _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len` - key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) - value_states = ( - self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) - ) - - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value[0].shape[-2] - if not is_cross_attention: - cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len)) - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) - # [bsz, nh, t, hd] - - if past_key_value is not None: - # reuse k, v, self_attention - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - - past_key_value = (key_states, value_states) if use_cache else None - - if self.qk_layer_norms: - query_states = self.q_layer_norm(query_states) - key_states = self.k_layer_norm(key_states) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" - ) - - attn_output = nn.functional.scaled_dot_product_attention( - query_states, - key_states, - value_states, - attn_mask=attention_mask, - dropout_p=self.dropout, - ) - - if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) - - attn_output = self.o_proj(attn_output) - - attn_weights = None - if output_attentions: - logger.warning_once( - "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead" - ) - - return attn_output, attn_weights, past_key_value - - -# this was adapted from LlamaDecoderLayer -class IdeficsDecoderLayer(nn.Module): - def __init__(self, config: IdeficsConfig): - super().__init__() - self.hidden_size = config.hidden_size - self.self_attn = IdeficsAttention( - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - dropout=config.dropout, - config=config, - ) - self.mlp = IdeficsMLP( - hidden_size=self.hidden_size, - intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, - ) - self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.dropout = config.dropout - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - """ - - residual = hidden_states - - hidden_states = self.input_layernorm(hidden_states) - - # Self Attention - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - ) - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - hidden_states = residual + hidden_states - - # Fully Connected - residual = hidden_states - hidden_states = self.post_attention_layernorm(hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - - return outputs - - -class IdeficsGatedCrossAttentionLayer(nn.Module): - def __init__(self, config: IdeficsConfig): - super().__init__() - self.hidden_size = config.hidden_size - self.cross_attn = IdeficsAttention( - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - is_cross_attention=True, - dropout=config.dropout, - config=config, - qk_layer_norms=config.qk_layer_norms, - ) - self.mlp = IdeficsMLP( - hidden_size=self.hidden_size, - intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, - ) - self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.config = config.dropout - - self.act_cross_attn = nn.Tanh() - self.act_dense = nn.Tanh() - - if config.alpha_initializer == "zeros": - if config.alpha_type == "vector": - self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) - self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) - elif config.alpha_type == "float": - self.alpha_cross_attn = nn.Parameter(torch.zeros(1)) - self.alpha_dense = nn.Parameter(torch.zeros(1)) - else: - raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") - - elif config.alpha_initializer == "ones": - if config.alpha_type == "vector": - self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size)) - self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size)) - elif config.alpha_type == "float": - self.alpha_cross_attn = nn.Parameter(torch.ones(1)) - self.alpha_dense = nn.Parameter(torch.ones(1)) - else: - raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") - - elif config.alpha_initializer in {"normal", "gaussian", "random"}: - if config.alpha_type == "vector": - self.alpha_cross_attn = nn.Parameter( - torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)) - ) - self.alpha_dense = nn.Parameter( - torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)) - ) - elif config.alpha_type == "float": - self.alpha_cross_attn = nn.Parameter( - torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)) - ) - self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))) - else: - raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") - - else: - raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!") - - if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")): - raise ValueError("Alpha parameters not initialized correctly!") - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_hidden_states: Optional[torch.Tensor] = None, - image_attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - no_images: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored - """ - if image_hidden_states is None: - raise ValueError( - "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" - " conditioned on." - ) - - if past_key_value is not None: - raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.") - - residual = hidden_states - - hidden_states = self.input_layernorm(hidden_states) - - # Self Attention - hidden_states, self_attn_weights, present_key_value = self.cross_attn( - hidden_states=hidden_states, - key_value_states=image_hidden_states, - attention_mask=image_attention_mask, - output_attentions=output_attentions, - ) - hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) - # when there are no images the model is used in pure language mode - gate = 0 if no_images else 1 - hidden_states = residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states - - # Fully Connected - residual = hidden_states - hidden_states = self.post_attention_layernorm(hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) - hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - - return outputs - - -LLAMA_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`IdeficsConfig`]): - Model configuration class with all the parameters of the model. Initializing with a config file does not - load the weights associated with the model, only the configuration. Check out the - [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - - -@add_start_docstrings( - "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", - LLAMA_START_DOCSTRING, -) -class IdeficsPreTrainedModel(PreTrainedModel): - config_class = IdeficsConfig - base_model_prefix = "model" - supports_gradient_checkpointing = True - _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] - - def _init_weights(self, module): - # important: this ported version of Idefics isn't meant for training from scratch - only - # inference and fine-tuning - so the proper init weights code has been removed - the m4 code - # base should be used for training from scratch and it contains the correct code. - std = self.config.initializer_range - if isinstance(module, nn.Linear): - module.weight.data.normal_(mean=0.0, std=std) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=std) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, IdeficsModel): - module.gradient_checkpointing = value - - -LLAMA_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see - `past_key_values`). - - If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] - and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more - information on the default strategy. - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape - `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape - `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. - - Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention - blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", - LLAMA_START_DOCSTRING, -) -class IdeficsModel(IdeficsPreTrainedModel): - """ - Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`] - - Args: - config: IdeficsConfig - """ - - def __init__(self, config: IdeficsConfig): - super().__init__(config) - self.config = config - self.padding_idx = config.pad_token_id - self.vocab_size = config.vocab_size - - self.embed_tokens = IdeficsDecoupledEmbedding( - num_embeddings=config.vocab_size, - num_additional_embeddings=config.additional_vocab_size, - embedding_dim=config.hidden_size, - partially_freeze=config.freeze_text_layers, - padding_idx=self.padding_idx, - ) - - self.image_size = config.vision_config.image_size - self.vision_config = config.vision_config - self.vision_model = IdeficsVisionTransformer(config.vision_config) - - # Perceiver Resampler - if config.use_resampler: - perceiver_config = config.perceiver_config - self.perceiver_resampler = IdeficsPerceiverResampler( - config, - config.vision_config.embed_dim, - perceiver_config.resampler_depth, - perceiver_config.resampler_n_heads, - perceiver_config.resampler_head_dim, - perceiver_config.resampler_n_latents, - ) - - self.layers = nn.ModuleList([IdeficsDecoderLayer(config) for _ in range(config.num_hidden_layers)]) - - self.cross_layer_interval = config.cross_layer_interval - num_cross_layers = config.num_hidden_layers // self.cross_layer_interval - self.gated_cross_attn_layers = nn.ModuleList( - [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)] - ) - self.gradient_checkpointing = False - - self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - self.gradient_checkpointing = False - # Initialize weights and apply final processing - self.post_init() - - self.freeze_relevant_params(config) - - def freeze_relevant_params(self, config=None): - if config is None: - config = self.config - - if config.freeze_text_layers: - self.freeze_text_layers(config.freeze_text_module_exceptions) - - if config.freeze_vision_layers: - freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) - - def freeze_text_layers(self, module_exceptions=[]): - for module in [self.layers, self.norm]: - freeze_model(module, module_exceptions=module_exceptions) - - def freeze_vision_layers(self, module_exceptions=[]): - freeze_model(self.vision_model, module_exceptions=module_exceptions) - - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value - - # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask - def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - combined_attention_mask = None - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask( - input_shape, - inputs_embeds.dtype, - device=inputs_embeds.device, - past_key_values_length=past_key_values_length, - ) - - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( - inputs_embeds.device - ) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask - ) - - return combined_attention_mask - - @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - pixel_values: Optional[torch.FloatTensor] = None, - image_encoder_embeddings: Optional[torch.FloatTensor] = None, - perceiver_embeddings: Optional[torch.FloatTensor] = None, - image_attention_mask: Optional[torch.Tensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - interpolate_pos_encoding: Optional[bool] = False, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]: - device = input_ids.device if input_ids is not None else inputs_embeds.device - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") - elif input_ids is not None: - batch_size, seq_length = input_ids.shape - elif inputs_embeds is not None: - batch_size, seq_length, _ = inputs_embeds.shape - else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") - - seq_length_with_past = seq_length - past_key_values_length = 0 - - if past_key_values is not None: - past_key_values_length = past_key_values[0][0].shape[2] - seq_length_with_past = seq_length_with_past + past_key_values_length - - if attention_mask is not None and position_ids is None: - # create position_ids on the fly for batch generation - position_ids = attention_mask.long().cumsum(-1) - 1 - position_ids.masked_fill_(attention_mask == 0, 1) - elif position_ids is None: - device = input_ids.device if input_ids is not None else inputs_embeds.device - position_ids = torch.arange( - past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device - ) - position_ids = position_ids.unsqueeze(0) - - no_images = False - if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2: - raise ValueError( - "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None." - ) - - elif pixel_values is not None: - no_images = len(torch.nonzero(pixel_values)) == 0 - pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility - batch_size, num_images = pixel_values.shape[:2] - pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) - - # Get sequence from the vision encoder - image_hidden_states = self.vision_model( - pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding - ).last_hidden_state - - elif image_encoder_embeddings is not None: - batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size() - image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=input_ids.device) - image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) - - if self.config.use_resampler: - if perceiver_embeddings is None: - perceiver_embeddings = self.perceiver_resampler(image_hidden_states) - image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2) - else: - batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size() - image_hidden_states = perceiver_embeddings - elif perceiver_embeddings is None: - image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2) - else: - raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True") - - image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) - # # Hack to use the model in full language modeling mode - # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device) - # Make image_attention_mask compatible with hidden states - text_seq_len = image_attention_mask.size(1) - image_attention_mask = image_attention_mask.unsqueeze(-1) - image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) - image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) - - if image_hidden_states is not None: - image_batch_size, image_sequence_length, _ = image_hidden_states.size() - image_hidden_shape = (image_batch_size, image_sequence_length) - if image_attention_mask is None: - image_attention_mask = torch.ones(image_hidden_shape, device=device) - image_attention_mask = self.invert_attention_mask(image_attention_mask) - else: - image_attention_mask = None - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) - # embed positions - if attention_mask is None: - attention_mask = torch.ones( - (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device - ) - attention_mask = self._prepare_decoder_attention_mask( - attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length - ) - - hidden_states = inputs_embeds - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - next_decoder_cache = () if use_cache else None - - for idx, decoder_layer in enumerate(self.layers): - if output_hidden_states: - all_hidden_states += (hidden_states,) - - past_key_value = past_key_values[idx] if past_key_values is not None else None - - def vblock( - main_block, - hidden_states, - attention_mask, - position_ids, - past_key_value, - image_hidden_states, - image_attention_mask, - output_attentions, - use_cache, - no_images, - layer_idx, - cross_layer_interval, - gated_cross_attn_layers, - ): - # TODO(ls): Add cross attention values to respective lists - if layer_idx % cross_layer_interval == 0: - xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] - outputs = xblock( - hidden_states, - attention_mask=attention_mask, - image_hidden_states=image_hidden_states, - image_attention_mask=image_attention_mask, - output_attentions=output_attentions, - use_cache=use_cache, - past_key_value=None, # not implemented - no_images=no_images, - ) - hidden_states = outputs[0] - - layer_outputs = main_block( - hidden_states, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - ) - - return layer_outputs - - if self.gradient_checkpointing and self.training: - past_key_value = None - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - layer_outputs = torch.utils.checkpoint.checkpoint( - vblock, - decoder_layer, - hidden_states, - attention_mask, - position_ids, - past_key_value, - image_hidden_states, - image_attention_mask, - output_attentions, - use_cache, - no_images, - idx, - self.cross_layer_interval, - self.gated_cross_attn_layers, - ) - else: - layer_outputs = vblock( - decoder_layer, - hidden_states, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - image_hidden_states=image_hidden_states, - image_attention_mask=image_attention_mask, - output_attentions=output_attentions, - use_cache=use_cache, - no_images=no_images, - layer_idx=idx, - cross_layer_interval=self.cross_layer_interval, - gated_cross_attn_layers=self.gated_cross_attn_layers, - ) - - hidden_states = layer_outputs[0] - - if use_cache: - next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) - - if output_attentions: - all_self_attns += (layer_outputs[1],) - - hidden_states = self.norm(hidden_states) - - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - - next_cache = next_decoder_cache if use_cache else None - image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size) - if not return_dict: - return tuple( - v - for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] - if v is not None - ) - return IdeficsBaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=next_cache, - hidden_states=all_hidden_states, - attentions=all_self_attns, - image_hidden_states=image_hidden_states, - ) - - -class IdeficsForVisionText2Text(IdeficsPreTrainedModel): - _keys_to_ignore_on_load_missing = [r"lm_head.weight"] - _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] - - def __init__(self, config, vision_model=None): - super().__init__(config) - self.model = IdeficsModel(config) - - self.lm_head = IdeficsDecoupledLinear( - in_features=config.hidden_size, - out_features=config.vocab_size, - out_additional_features=config.additional_vocab_size, - bias=False, - partially_freeze=config.freeze_lm_head, - ) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.model.embed_tokens - - def set_input_embeddings(self, value): - self.model.embed_tokens = value - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings): - self.lm_head = new_embeddings - - def set_decoder(self, decoder): - self.model = decoder - - def get_decoder(self): - return self.model - - def tie_weights(self): - """ - Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of - IdeficsDecoupledLinear and IdeficsDecoupledEmbedding. - """ - output_embeddings = self.get_output_embeddings() - input_embeddings = self.get_input_embeddings() - - if getattr(self.config, "tie_word_embeddings", True): - output_embeddings.weight = input_embeddings.weight - if input_embeddings.num_additional_embeddings > 0: - assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings - output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight - - if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): - output_embeddings.out_features = input_embeddings.num_embeddings - if hasattr(output_embeddings, "out_additional_features") and hasattr( - input_embeddings, "num_additional_embeddings" - ): - output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings - - @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - pixel_values: Optional[torch.FloatTensor] = None, - image_encoder_embeddings: Optional[torch.FloatTensor] = None, - perceiver_embeddings: Optional[torch.FloatTensor] = None, - image_attention_mask: Optional[torch.Tensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - interpolate_pos_encoding: Optional[bool] = False, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]: - r""" - Args: - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., - config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored - (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - - Returns: - - Example: - - ```python - >>> from transformers import AutoTokenizer, IdeficsForVisionText2Text - - >>> model = IdeficsForVisionText2Text.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) - >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) - - >>> prompt = "Hey, are you consciours? Can you talk to me?" - >>> inputs = tokenizer(prompt, return_tensors="pt") - - >>> # Generate - >>> generate_ids = model.generate(inputs.input_ids, max_length=30) - >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] - "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." - ```""" - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - pixel_values=pixel_values, - image_encoder_embeddings=image_encoder_embeddings, - perceiver_embeddings=perceiver_embeddings, - image_attention_mask=image_attention_mask, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - interpolate_pos_encoding=interpolate_pos_encoding, - return_dict=return_dict, - ) - - hidden_states = outputs[0] - logits = self.lm_head(hidden_states) - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - if attention_mask is not None: - shift_attention_mask = attention_mask[..., 1:] - shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() - shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() - else: - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss() - loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - return IdeficsCausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - image_hidden_states=outputs.image_hidden_states, - ) - - def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): - image_hidden_states = kwargs.pop("image_hidden_states", None) - if image_hidden_states is not None: - if self.config.use_resampler: - kwargs["perceiver_embeddings"] = image_hidden_states - else: - kwargs["image_encoder_embeddings"] = image_hidden_states - kwargs["pixel_values"] = None - inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) - unwanted_kwargs = ["token_type_ids"] - for kwarg in unwanted_kwargs: - inputs.pop(kwarg, None) - return inputs - - @staticmethod - def _expand_inputs_for_generation( - *args, - **model_kwargs, - ): - return expand_inputs_for_generation(*args, **model_kwargs) - - @staticmethod - def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder): - return update_model_kwargs_for_generation(outputs, model_kwargs) - - @staticmethod - def _reorder_cache(past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) - return reordered_past diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/image_processing_layoutlmv2.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/image_processing_layoutlmv2.py deleted file mode 100644 index b1e6c0731d2954e399bb1873e2a9cd2662f370b1..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/image_processing_layoutlmv2.py +++ /dev/null @@ -1,288 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Image processor class for LayoutLMv2.""" - -from typing import Dict, Optional, Union - -import numpy as np - -from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image -from ...image_utils import ( - ChannelDimension, - ImageInput, - PILImageResampling, - infer_channel_dimension_format, - make_list_of_images, - to_numpy_array, - valid_images, -) -from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends - - -if is_vision_available(): - import PIL - -# soft dependency -if is_pytesseract_available(): - import pytesseract - -logger = logging.get_logger(__name__) - - -def normalize_box(box, width, height): - return [ - int(1000 * (box[0] / width)), - int(1000 * (box[1] / height)), - int(1000 * (box[2] / width)), - int(1000 * (box[3] / height)), - ] - - -def apply_tesseract( - image: np.ndarray, - lang: Optional[str], - tesseract_config: Optional[str] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -): - """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" - tesseract_config = tesseract_config if tesseract_config is not None else "" - - # apply OCR - pil_image = to_pil_image(image, input_data_format=input_data_format) - image_width, image_height = pil_image.size - data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config) - words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] - - # filter empty words and corresponding coordinates - irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] - words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] - left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] - top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] - width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] - height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] - - # turn coordinates into (left, top, left+width, top+height) format - actual_boxes = [] - for x, y, w, h in zip(left, top, width, height): - actual_box = [x, y, x + w, y + h] - actual_boxes.append(actual_box) - - # finally, normalize the bounding boxes - normalized_boxes = [] - for box in actual_boxes: - normalized_boxes.append(normalize_box(box, image_width, image_height)) - - assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes" - - return words, normalized_boxes - - -class LayoutLMv2ImageProcessor(BaseImageProcessor): - r""" - Constructs a LayoutLMv2 image processor. - - Args: - do_resize (`bool`, *optional*, defaults to `True`): - Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be - overridden by `do_resize` in `preprocess`. - size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): - Size of the image after resizing. Can be overridden by `size` in `preprocess`. - resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): - Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the - `preprocess` method. - apply_ocr (`bool`, *optional*, defaults to `True`): - Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by - `apply_ocr` in `preprocess`. - ocr_lang (`str`, *optional*): - The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is - used. Can be overridden by `ocr_lang` in `preprocess`. - tesseract_config (`str`, *optional*, defaults to `""`): - Any additional custom configuration flags that are forwarded to the `config` parameter when calling - Tesseract. For example: '--psm 6'. Can be overridden by `tesseract_config` in `preprocess`. - """ - - model_input_names = ["pixel_values"] - - def __init__( - self, - do_resize: bool = True, - size: Dict[str, int] = None, - resample: PILImageResampling = PILImageResampling.BILINEAR, - apply_ocr: bool = True, - ocr_lang: Optional[str] = None, - tesseract_config: Optional[str] = "", - **kwargs, - ) -> None: - super().__init__(**kwargs) - size = size if size is not None else {"height": 224, "width": 224} - size = get_size_dict(size) - - self.do_resize = do_resize - self.size = size - self.resample = resample - self.apply_ocr = apply_ocr - self.ocr_lang = ocr_lang - self.tesseract_config = tesseract_config - - # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize - def resize( - self, - image: np.ndarray, - size: Dict[str, int], - resample: PILImageResampling = PILImageResampling.BILINEAR, - data_format: Optional[Union[str, ChannelDimension]] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ) -> np.ndarray: - """ - Resize an image to `(size["height"], size["width"])`. - - Args: - image (`np.ndarray`): - Image to resize. - size (`Dict[str, int]`): - Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. - resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): - `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. - data_format (`ChannelDimension` or `str`, *optional*): - The channel dimension format for the output image. If unset, the channel dimension format of the input - image is used. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - input_data_format (`ChannelDimension` or `str`, *optional*): - The channel dimension format for the input image. If unset, the channel dimension format is inferred - from the input image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - - Returns: - `np.ndarray`: The resized image. - """ - size = get_size_dict(size) - if "height" not in size or "width" not in size: - raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") - output_size = (size["height"], size["width"]) - return resize( - image, - size=output_size, - resample=resample, - data_format=data_format, - input_data_format=input_data_format, - **kwargs, - ) - - def preprocess( - self, - images: ImageInput, - do_resize: bool = None, - size: Dict[str, int] = None, - resample: PILImageResampling = None, - apply_ocr: bool = None, - ocr_lang: Optional[str] = None, - tesseract_config: Optional[str] = None, - return_tensors: Optional[Union[str, TensorType]] = None, - data_format: ChannelDimension = ChannelDimension.FIRST, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ) -> PIL.Image.Image: - """ - Preprocess an image or batch of images. - - Args: - images (`ImageInput`): - Image to preprocess. - do_resize (`bool`, *optional*, defaults to `self.do_resize`): - Whether to resize the image. - size (`Dict[str, int]`, *optional*, defaults to `self.size`): - Desired size of the output image after resizing. - resample (`PILImageResampling`, *optional*, defaults to `self.resample`): - Resampling filter to use if resizing the image. This can be one of the enum `PIL.Image` resampling - filter. Only has an effect if `do_resize` is set to `True`. - apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`): - Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. - ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`): - The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is - used. - tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`): - Any additional custom configuration flags that are forwarded to the `config` parameter when calling - Tesseract. - return_tensors (`str` or `TensorType`, *optional*): - The type of tensors to return. Can be one of: - - Unset: Return a list of `np.ndarray`. - - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. - data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): - The channel dimension format for the output image. Can be one of: - - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - """ - do_resize = do_resize if do_resize is not None else self.do_resize - size = size if size is not None else self.size - size = get_size_dict(size) - resample = resample if resample is not None else self.resample - apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr - ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang - tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config - - images = make_list_of_images(images) - - if not valid_images(images): - raise ValueError( - "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " - "torch.Tensor, tf.Tensor or jax.ndarray." - ) - - if do_resize and size is None: - raise ValueError("Size must be specified if do_resize is True.") - - # All transformations expect numpy arrays. - images = [to_numpy_array(image) for image in images] - - if input_data_format is None: - # We assume that all images have the same channel dimension format. - input_data_format = infer_channel_dimension_format(images[0]) - - if apply_ocr: - requires_backends(self, "pytesseract") - words_batch = [] - boxes_batch = [] - for image in images: - words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format) - words_batch.append(words) - boxes_batch.append(boxes) - - if do_resize: - images = [ - self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) - for image in images - ] - - # flip color channels from RGB to BGR (as Detectron2 requires this) - images = [flip_channel_order(image, input_data_format=input_data_format) for image in images] - images = [ - to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images - ] - - data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) - - if apply_ocr: - data["words"] = words_batch - data["boxes"] = boxes_batch - return data diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/logger/utils.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/logger/utils.py deleted file mode 100644 index 485681ced897980dc0bf5b149308245bbd708de9..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/logger/utils.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -import yaml -import json -import pickle -import torch - -def traverse_dir( - root_dir, - extensions, - amount=None, - str_include=None, - str_exclude=None, - is_pure=False, - is_sort=False, - is_ext=True): - - file_list = [] - cnt = 0 - for root, _, files in os.walk(root_dir): - for file in files: - if any([file.endswith(f".{ext}") for ext in extensions]): - # path - mix_path = os.path.join(root, file) - pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path - - # amount - if (amount is not None) and (cnt == amount): - if is_sort: - file_list.sort() - return file_list - - # check string - if (str_include is not None) and (str_include not in pure_path): - continue - if (str_exclude is not None) and (str_exclude in pure_path): - continue - - if not is_ext: - ext = pure_path.split('.')[-1] - pure_path = pure_path[:-(len(ext)+1)] - file_list.append(pure_path) - cnt += 1 - if is_sort: - file_list.sort() - return file_list - - - -class DotDict(dict): - def __getattr__(*args): - val = dict.get(*args) - return DotDict(val) if type(val) is dict else val - - __setattr__ = dict.__setitem__ - __delattr__ = dict.__delitem__ - - -def get_network_paras_amount(model_dict): - info = dict() - for model_name, model in model_dict.items(): - # all_params = sum(p.numel() for p in model.parameters()) - trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - - info[model_name] = trainable_params - return info - - -def load_config(path_config): - with open(path_config, "r") as config: - args = yaml.safe_load(config) - args = DotDict(args) - # print(args) - return args - -def save_config(path_config,config): - config = dict(config) - with open(path_config, "w") as f: - yaml.dump(config, f) - -def to_json(path_params, path_json): - params = torch.load(path_params, map_location=torch.device('cpu')) - raw_state_dict = {} - for k, v in params.items(): - val = v.flatten().numpy().tolist() - raw_state_dict[k] = val - - with open(path_json, 'w') as outfile: - json.dump(raw_state_dict, outfile,indent= "\t") - - -def convert_tensor_to_numpy(tensor, is_squeeze=True): - if is_squeeze: - tensor = tensor.squeeze() - if tensor.requires_grad: - tensor = tensor.detach() - if tensor.is_cuda: - tensor = tensor.cpu() - return tensor.numpy() - - -def load_model( - expdir, - model, - optimizer, - name='model', - postfix='', - device='cpu'): - if postfix == '': - postfix = '_' + postfix - path = os.path.join(expdir, name+postfix) - path_pt = traverse_dir(expdir, ['pt'], is_ext=False) - global_step = 0 - if len(path_pt) > 0: - steps = [s[len(path):] for s in path_pt] - maxstep = max([int(s) if s.isdigit() else 0 for s in steps]) - if maxstep >= 0: - path_pt = path+str(maxstep)+'.pt' - else: - path_pt = path+'best.pt' - print(' [*] restoring model from', path_pt) - ckpt = torch.load(path_pt, map_location=torch.device(device)) - global_step = ckpt['global_step'] - model.load_state_dict(ckpt['model'], strict=False) - if ckpt.get('optimizer') != None: - optimizer.load_state_dict(ckpt['optimizer']) - return global_step, model, optimizer diff --git a/spaces/yueranseo/mygpt/modules/models/minimax.py b/spaces/yueranseo/mygpt/modules/models/minimax.py deleted file mode 100644 index 2e1b50280fd2fbc43a69caaf660a0d64beaa405b..0000000000000000000000000000000000000000 --- a/spaces/yueranseo/mygpt/modules/models/minimax.py +++ /dev/null @@ -1,161 +0,0 @@ -import json -import os - -import colorama -import requests -import logging - -from modules.models.base_model import BaseLLMModel -from modules.presets import STANDARD_ERROR_MSG, GENERAL_ERROR_MSG, TIMEOUT_STREAMING, TIMEOUT_ALL, i18n - -group_id = os.environ.get("MINIMAX_GROUP_ID", "") - - -class MiniMax_Client(BaseLLMModel): - """ - MiniMax Client - 接口文档见 https://api.minimax.chat/document/guides/chat - """ - - def __init__(self, model_name, api_key, user_name="", system_prompt=None): - super().__init__(model_name=model_name, user=user_name) - self.url = f'https://api.minimax.chat/v1/text/chatcompletion?GroupId={group_id}' - self.history = [] - self.api_key = api_key - self.system_prompt = system_prompt - self.headers = { - "Authorization": f"Bearer {api_key}", - "Content-Type": "application/json" - } - - def get_answer_at_once(self): - # minimax temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': [{"sender_type": "USER", "text": self.history[-1]['content']}] - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - request_body['prompt'] = self.system_prompt - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - if self.top_p: - request_body['top_p'] = self.top_p - - response = requests.post(self.url, headers=self.headers, json=request_body) - - res = response.json() - answer = res['reply'] - total_token_count = res["usage"]["total_tokens"] - return answer, total_token_count - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def _get_response(self, stream=False): - minimax_api_key = self.api_key - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {minimax_api_key}", - } - - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - messages = [] - for msg in self.history: - if msg['role'] == 'user': - messages.append({"sender_type": "USER", "text": msg['content']}) - else: - messages.append({"sender_type": "BOT", "text": msg['content']}) - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': messages - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - lines = self.system_prompt.splitlines() - if lines[0].find(":") != -1 and len(lines[0]) < 20: - request_body["role_meta"] = { - "user_name": lines[0].split(":")[0], - "bot_name": lines[0].split(":")[1] - } - lines.pop() - request_body["prompt"] = "\n".join(lines) - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - else: - request_body['tokens_to_generate'] = 512 - if self.top_p: - request_body['top_p'] = self.top_p - - if stream: - timeout = TIMEOUT_STREAMING - request_body['stream'] = True - request_body['use_standard_sse'] = True - else: - timeout = TIMEOUT_ALL - try: - response = requests.post( - self.url, - headers=headers, - json=request_body, - stream=stream, - timeout=timeout, - ) - except: - return None - - return response - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - print(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if "finish_reason" in chunk["choices"][0] and chunk["choices"][0]["finish_reason"] == "stop": - self.all_token_counts.append(chunk["usage"]["total_tokens"] - sum(self.all_token_counts)) - break - try: - yield chunk["choices"][0]["delta"] - except Exception as e: - logging.error(f"Error: {e}") - continue - if error_msg: - try: - error_msg = json.loads(error_msg) - if 'base_resp' in error_msg: - status_code = error_msg['base_resp']['status_code'] - status_msg = error_msg['base_resp']['status_msg'] - raise Exception(f"{status_code} - {status_msg}") - except json.JSONDecodeError: - pass - raise Exception(error_msg) diff --git a/spaces/yujieq/RxnScribe/app.py b/spaces/yujieq/RxnScribe/app.py deleted file mode 100644 index 11d91a8ec317aac78de9ccc0d3acfa0530a91663..0000000000000000000000000000000000000000 --- a/spaces/yujieq/RxnScribe/app.py +++ /dev/null @@ -1,86 +0,0 @@ -import gradio as gr - -import os -import glob -import cv2 -import numpy as np -import torch -from rxnscribe import RxnScribe - -from huggingface_hub import hf_hub_download - -REPO_ID = "yujieq/RxnScribe" -FILENAME = "pix2seq_reaction_full.ckpt" -ckpt_path = hf_hub_download(REPO_ID, FILENAME) - -device = torch.device('cpu') -model = RxnScribe(ckpt_path, device) - - -def get_markdown(reaction): - output = [] - for x in ['reactants', 'conditions', 'products']: - s = '' - for ent in reaction[x]: - if 'smiles' in ent: - s += "\n```\n" + ent['smiles'] + "\n```\n" - elif 'text' in ent: - s += ' '.join(ent['text']) + '
          ' - else: - s += ent['category'] - output.append(s) - return output - - -def predict(image, molscribe, ocr): - predictions = model.predict_image(image, molscribe=molscribe, ocr=ocr) - pred_image = model.draw_predictions_combined(predictions, image=image) - markdown = [[i] + get_markdown(reaction) for i, reaction in enumerate(predictions)] - return pred_image, markdown - - -with gr.Blocks() as demo: - gr.Markdown(""" -

          RxnScribe

          - - Extract chemical reactions from a diagram. Please upload a reaction diagram, RxnScribe will predict the reaction structures in the diagram. - - The predicted reactions are visualized in separate images. - Red boxes are reactants. - Green boxes are reaction conditions. - Blue boxes are products. - - It usually takes 5-10 seconds to process a diagram with this demo. - Check the options to run [MolScribe](https://huggingface.co/spaces/yujieq/MolScribe) and [OCR](https://huggingface.co/spaces/tomofi/EasyOCR) (it will take a longer time, of course). - - Paper: [RxnScribe: A Sequence Generation Model for Reaction Diagram Parsing](https://pubs.acs.org/doi/10.1021/acs.jcim.3c00439) - - Code: [https://github.com/thomas0809/RxnScribe](https://github.com/thomas0809/RxnScribe) - - Authors: [Yujie Qian](mailto:yujieq@csail.mit.edu), Jiang Guo, Zhengkai Tu, Connor W. Coley, Regina Barzilay. _MIT CSAIL_. - """) - with gr.Column(): - with gr.Row(): - image = gr.Image(label="Upload reaction diagram", show_label=False, type='pil').style(height=256) - with gr.Row(): - molscribe = gr.Checkbox(label="Run MolScribe to recognize molecule structures") - ocr = gr.Checkbox(label="Run OCR to recognize text") - btn = gr.Button("Submit").style(full_width=False) - with gr.Row(): - gallery = gr.Image(label='Predicted reactions', show_label=True).style(height="auto") - markdown = gr.Dataframe( - headers=['#', 'reactant', 'condition', 'product'], - datatype=['number'] + ['markdown'] * 3, - wrap=False - ) - - btn.click(predict, inputs=[image, molscribe, ocr], outputs=[gallery, markdown]) - - gr.Examples( - examples=sorted(glob.glob('examples/*.png')), - inputs=[image], - outputs=[gallery, markdown], - fn=predict, - ) - -demo.launch() diff --git a/spaces/yuntian-deng/latex2im/README.md b/spaces/yuntian-deng/latex2im/README.md deleted file mode 100644 index 008fefce64525c6fcb8c381ddc1c4f9f8a0804c9..0000000000000000000000000000000000000000 --- a/spaces/yuntian-deng/latex2im/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Latex2im -emoji: 🔥 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zeykz/rvc-mlbb-v2zey/lib/infer_pack/models.py b/spaces/zeykz/rvc-mlbb-v2zey/lib/infer_pack/models.py deleted file mode 100644 index 44c08d361bcb13b84b38dc29beff5cdaddad4ea2..0000000000000000000000000000000000000000 --- a/spaces/zeykz/rvc-mlbb-v2zey/lib/infer_pack/models.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/zhang-wei-jian/docker/node_modules/koa/lib/context.js b/spaces/zhang-wei-jian/docker/node_modules/koa/lib/context.js deleted file mode 100644 index f6c0f111ba6996c0a20e8436ef2400a1e4b418e9..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/koa/lib/context.js +++ /dev/null @@ -1,251 +0,0 @@ - -'use strict'; - -/** - * Module dependencies. - */ - -const util = require('util'); -const createError = require('http-errors'); -const httpAssert = require('http-assert'); -const delegate = require('delegates'); -const statuses = require('statuses'); -const Cookies = require('cookies'); - -const COOKIES = Symbol('context#cookies'); - -/** - * Context prototype. - */ - -const proto = module.exports = { - - /** - * util.inspect() implementation, which - * just returns the JSON output. - * - * @return {Object} - * @api public - */ - - inspect() { - if (this === proto) return this; - return this.toJSON(); - }, - - /** - * Return JSON representation. - * - * Here we explicitly invoke .toJSON() on each - * object, as iteration will otherwise fail due - * to the getters and cause utilities such as - * clone() to fail. - * - * @return {Object} - * @api public - */ - - toJSON() { - return { - request: this.request.toJSON(), - response: this.response.toJSON(), - app: this.app.toJSON(), - originalUrl: this.originalUrl, - req: '', - res: '', - socket: '' - }; - }, - - /** - * Similar to .throw(), adds assertion. - * - * this.assert(this.user, 401, 'Please login!'); - * - * See: https://github.com/jshttp/http-assert - * - * @param {Mixed} test - * @param {Number} status - * @param {String} message - * @api public - */ - - assert: httpAssert, - - /** - * Throw an error with `status` (default 500) and - * `msg`. Note that these are user-level - * errors, and the message may be exposed to the client. - * - * this.throw(403) - * this.throw(400, 'name required') - * this.throw('something exploded') - * this.throw(new Error('invalid')) - * this.throw(400, new Error('invalid')) - * - * See: https://github.com/jshttp/http-errors - * - * Note: `status` should only be passed as the first parameter. - * - * @param {String|Number|Error} err, msg or status - * @param {String|Number|Error} [err, msg or status] - * @param {Object} [props] - * @api public - */ - - throw(...args) { - throw createError(...args); - }, - - /** - * Default error handling. - * - * @param {Error} err - * @api private - */ - - onerror(err) { - // don't do anything if there is no error. - // this allows you to pass `this.onerror` - // to node-style callbacks. - if (null == err) return; - - // When dealing with cross-globals a normal `instanceof` check doesn't work properly. - // See https://github.com/koajs/koa/issues/1466 - // We can probably remove it once jest fixes https://github.com/facebook/jest/issues/2549. - const isNativeError = - Object.prototype.toString.call(err) === '[object Error]' || - err instanceof Error; - if (!isNativeError) err = new Error(util.format('non-error thrown: %j', err)); - - let headerSent = false; - if (this.headerSent || !this.writable) { - headerSent = err.headerSent = true; - } - - // delegate - this.app.emit('error', err, this); - - // nothing we can do here other - // than delegate to the app-level - // handler and log. - if (headerSent) { - return; - } - - const { res } = this; - - // first unset all headers - /* istanbul ignore else */ - if (typeof res.getHeaderNames === 'function') { - res.getHeaderNames().forEach(name => res.removeHeader(name)); - } else { - res._headers = {}; // Node < 7.7 - } - - // then set those specified - this.set(err.headers); - - // force text/plain - this.type = 'text'; - - let statusCode = err.status || err.statusCode; - - // ENOENT support - if ('ENOENT' === err.code) statusCode = 404; - - // default to 500 - if ('number' !== typeof statusCode || !statuses[statusCode]) statusCode = 500; - - // respond - const code = statuses[statusCode]; - const msg = err.expose ? err.message : code; - this.status = err.status = statusCode; - this.length = Buffer.byteLength(msg); - res.end(msg); - }, - - get cookies() { - if (!this[COOKIES]) { - this[COOKIES] = new Cookies(this.req, this.res, { - keys: this.app.keys, - secure: this.request.secure - }); - } - return this[COOKIES]; - }, - - set cookies(_cookies) { - this[COOKIES] = _cookies; - } -}; - -/** - * Custom inspection implementation for newer Node.js versions. - * - * @return {Object} - * @api public - */ - -/* istanbul ignore else */ -if (util.inspect.custom) { - module.exports[util.inspect.custom] = module.exports.inspect; -} - -/** - * Response delegation. - */ - -delegate(proto, 'response') - .method('attachment') - .method('redirect') - .method('remove') - .method('vary') - .method('has') - .method('set') - .method('append') - .method('flushHeaders') - .access('status') - .access('message') - .access('body') - .access('length') - .access('type') - .access('lastModified') - .access('etag') - .getter('headerSent') - .getter('writable'); - -/** - * Request delegation. - */ - -delegate(proto, 'request') - .method('acceptsLanguages') - .method('acceptsEncodings') - .method('acceptsCharsets') - .method('accepts') - .method('get') - .method('is') - .access('querystring') - .access('idempotent') - .access('socket') - .access('search') - .access('method') - .access('query') - .access('path') - .access('url') - .access('accept') - .getter('origin') - .getter('href') - .getter('subdomains') - .getter('protocol') - .getter('host') - .getter('hostname') - .getter('URL') - .getter('header') - .getter('headers') - .getter('secure') - .getter('stale') - .getter('fresh') - .getter('ips') - .getter('ip'); diff --git a/spaces/zhanghaohui/szu-gpt-academic/docs/README.md.German.md b/spaces/zhanghaohui/szu-gpt-academic/docs/README.md.German.md deleted file mode 100644 index 0fe200cf690b6c9ff699e2e19bb53fd3cd60c201..0000000000000000000000000000000000000000 --- a/spaces/zhanghaohui/szu-gpt-academic/docs/README.md.German.md +++ /dev/null @@ -1,307 +0,0 @@ -> **Hinweis** -> -> Bei der Installation von Abhängigkeiten sollten nur die in **requirements.txt** **angegebenen Versionen** streng ausgewählt werden. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/` - -# GPT Akademisch optimiert (GPT Academic) - -**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Stern; wenn Sie bessere Tastenkombinationen oder Funktions-Plugins entwickelt haben, können Sie gerne einen Pull Request eröffnen.** - -Wenn Sie dieses Projekt mögen, geben Sie ihm bitte einen Stern. Wenn Sie weitere nützliche wissenschaftliche Abkürzungen oder funktionale Plugins entwickelt haben, können Sie gerne ein Problem oder eine Pull-Anforderung öffnen. Wir haben auch ein README in [Englisch|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md), das von diesem Projekt selbst übersetzt wurde. -Um dieses Projekt in eine beliebige Sprache mit GPT zu übersetzen, lesen Sie `multi_language.py` (experimentell). - -> **Hinweis** -> -> 1. Beachten Sie bitte, dass nur Funktionserweiterungen (Schaltflächen) mit **roter Farbe** Dateien lesen können und einige Erweiterungen im **Dropdown-Menü** des Erweiterungsbereichs zu finden sind. Außerdem begrüßen wir jede neue Funktionserweiterung mit **höchster Priorität** und bearbeiten sie. -> -> 2. Die Funktionalität jeder Datei in diesem Projekt wird in der Selbstanalyse [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) detailliert beschrieben. Mit der Weiterentwicklung der Versionen können Sie jederzeit die zugehörigen Funktions-Erweiterungen aufrufen, um durch Aufruf von GPT einen Selbstanalysebericht des Projekts zu erstellen. Häufig gestellte Fragen finden Sie in der [`Wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installationsanweisungen](#Installation). -> -> 3. Dieses Projekt ist kompatibel und fördert die Verwendung von inländischen Sprachmodellen wie ChatGLM und RWKV, Pangu, etc. Es unterstützt das Vorhandensein mehrerer api-keys, die in der Konfigurationsdatei wie folgt angegeben werden können: `API_KEY="openai-key1,openai-key2,api2d-key3"`. Wenn ein `API_KEY` temporär geändert werden muss, geben Sie den temporären `API_KEY` im Eingabebereich ein und drücken Sie dann die Eingabetaste, um ihn zu übernehmen.Funktion | Beschreibung ---- | --- -Ein-Klick-Polieren | Unterstützt ein-Klick-Polieren und ein-Klick-Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten -Ein-Klick Chinesisch-Englisch Übersetzung | Ein-Klick Chinesisch-Englisch Übersetzung -Ein-Klick-Code-Erklärung | Zeigt Code, erklärt Code, erzeugt Code und fügt Kommentare zum Code hinzu -[Benutzerdefinierte Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) | Unterstützt benutzerdefinierte Tastenkombinationen -Modulare Gestaltung | Unterstützt leistungsstarke individuelle [Funktions-Plugins](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions). Plugins unterstützen [Hot-Updates](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Selbstprogramm-Analyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] [Ein-Klick Verstehen](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) der Quellcode dieses Projekts -[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] Ein-Klick-Analyse des Projektbaums anderer Python/C/C++/Java/Lua/...-Projekte -Lesen von Papieren, [Übersetzen](https://www.bilibili.com/video/BV1KT411x7Wn) von Papieren | [Funktions-Plugin] Ein-Klick Erklärung des gesamten LaTeX/PDF-Artikels und Erstellung einer Zusammenfassung -LaTeX-Volltext-Übersetzung und [Polieren](https://www.bilibili.com/video/BV1FT411H7c5/) | [Funktions-Plugin] Ein-Klick-Übersetzung oder-Polieren des LaTeX-Artikels -Bulk-Kommentargenerierung | [Funktions-Plugin] Ein-Klick Massenerstellung von Funktionskommentaren -Markdown [Chinesisch-Englisch Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Funktions-Plugin] Haben Sie die [README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen? -Analyse-Berichtserstellung von chat | [Funktions-Plugin] Automatische Zusammenfassung nach der Ausführung -[Funktion zur vollständigen Übersetzung von PDF-Artikeln](https://www.bilibili.com/video/BV1KT411x7Wn) | [Funktions-Plugin] Extrahiert Titel und Zusammenfassung der PDF-Artikel und übersetzt den gesamten Text (mehrere Threads) -[Arxiv-Assistent](https://www.bilibili.com/video/BV1LM4y1279X) | [Funktions-Plugin] Geben Sie die Arxiv-Artikel-URL ein und klicken Sie auf Eine-Klick-Übersetzung-Zusammenfassung + PDF-Download -[Google Scholar Integrations-Assistent](https://www.bilibili.com/video/BV19L411U7ia) | [Funktions-Plugin] Geben Sie eine beliebige Google Scholar Such-URL ein und lassen Sie gpt Ihnen bei der Erstellung von [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) helfen -Internet-Informationen Aggregation + GPT | [Funktions-Plugin] Lassen Sie GPT eine Frage beantworten, indem es [zuerst Informationen aus dem Internet](https://www.bilibili.com/video/BV1om4y127ck/) sammelt und so die Informationen nie veralten -Anzeige von Formeln / Bildern / Tabellen | Zeigt Formeln in beiden Formen, [TeX-Format und gerendeter Form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), unterstützt Formeln und Code-Highlights -Unterstützung von PlugIns mit mehreren Threads | Unterstützt den Aufruf mehrerer Threads in Chatgpt, um Text oder Programme [Batch zu verarbeiten](https://www.bilibili.com/video/BV1FT411H7c5/) -Starten Sie das dunkle Gradio-[Thema](https://github.com/binary-husky/chatgpt_academic/issues/173) | Fügen Sie ```/?__theme=dark``` an das Ende der Browser-URL an, um das dunkle Thema zu aktivieren -[Unterstützung für mehrere LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) Interface-Unterstützung | Das Gefühl, gleichzeitig von GPT3.5, GPT4, [Tshinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) bedient zu werden, muss toll sein, oder? -Zugriff auf weitere LLM-Modelle, Unterstützung von [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der Unterstützung von [Jittorllms](https://github.com/Jittor/JittorLLMs) der Tsinghua-Universität, [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) und [Pangu alpha](https://openi.org.cn/pangu/) -Weitere neue Funktionen (wie Bildgenerierung) …… | Siehe Ende dieses Dokuments …… - -- Neue Oberfläche (Ändern Sie die LAYOUT-Option in `config.py`, um zwischen "Seitenlayout" und "Oben-unten-Layout" zu wechseln) -
          - -
          - All buttons are dynamically generated by reading `functional.py`, and custom functions can be easily added, freeing up the clipboard. -
          - -
          - -- Proofreading/Correcting -
          - -
          - -- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading. -
          - -
          - -- Don't feel like reading the project code? Show off the entire project to chatgpt. -
          - -
          - -- Multiple large language models are mixed and called together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4). -
          - -
          - ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configure API_KEY - -Configure API KEY and other settings in `config.py`. [Special Network Environment Settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a "config_private.py" private configuration file, and use the configuration defined in it to override the configuration of "config.py". Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named "config_private.py" next to "config.py" and transfer (copy) the configurations in "config.py" to "config_private.py". "config_private.py" is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` >`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familar with Python) (Python version 3.9 or above, the newer the better), Note: Use the official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If not familiar with Python) Use anaconda with similar steps (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # Same step as pip installation -``` - -
          Click to expand if supporting Tsinghua ChatGLM/Fudan MOSS as backend -

          - -[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration): -```sh -# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path - -# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

          -
          - - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation-Method 2: Using Docker - -1. Only ChatGPT (Recommended for most people) - -``` sh -git clone https://github.com/binary-husky/chatgpt_academic.git # Download the project -cd chatgpt_academic # Enter the path -nano config.py # Edit config.py with any text editor, Configure "Proxy","API_KEY"and"WEB_PORT" (e.g 50923) etc. -docker build -t gpt-academic . # Install - -# (Last step-option 1) Under Linux environment, use `--net=host` is more convenient and quick -docker run --rm -it --net=host gpt-academic -# (Last step-option 2) Under macOS/windows environment, can only use the -p option to expose the container's port(eg.50923) to the port on the host. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Requires familiarity with Docker) - -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 3, and retain solution 2. Modify the configuration of solution 2 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - -3. ChatGPT+LLAMA+Pangu+RWKV(Requires familiarity with Docker) -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 2, and retain solution 3. Modify the configuration of solution 3 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - - -## Installation-Method 3: Other Deployment Options - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote cloud server deployment (requires cloud server knowledge and experience) -Please visit [Deployment wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL 2 (Windows subsystem for Linux) -Please visit [Deployment wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at a secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI operating instructions](docs/WithFastapi.md) - -5. Use docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenience buttons / custom function plugins. - -1. Customize new convenience buttons (Academic Shortcut Keys) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, then the prefix and suffix can be hot-modified, and it will take effect without restarting the program.) -For example -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, used to describe your requirements, such as translation, explaining code, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain the proper nouns that appear in the text one by one:\n\n", - - # Suffix, will be added after your input. For example, combined with prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
          - -
          - -2. Custom function plugins - -Write powerful function plugins to perform any task you want and can't think of. -The difficulty of plugin writing and debugging is very low in this project. As long as you have a certain knowledge of Python, you can implement your own plugin functions by imitating the template we provided. -For more information, please refer to the [Function Plugin Guide](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New feature dynamics1. Funktion zur Speicherung von Dialogen. Rufen Sie im Bereich der Funktions-Plugins "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbares und wiederherstellbares HTML-Datei zu speichern. Darüber hinaus können Sie im Funktions-Plugin-Bereich (Dropdown-Menü) "Laden von Dialogverlauf" aufrufen, um den vorherigen Dialog wiederherzustellen. Tipp: Wenn Sie keine Datei angeben und stattdessen direkt auf "Laden des Dialogverlaufs" klicken, können Sie das HTML-Cache-Archiv anzeigen. Durch Klicken auf "Löschen aller lokalen Dialogverlaufsdatensätze" können alle HTML-Archiv-Caches gelöscht werden. -
          - -
          - -2. Berichterstellung. Die meisten Plugins generieren nach Abschluss der Ausführung einen Arbeitsbericht. -
          - - - -
          - -3. Modularisierte Funktionsgestaltung, einfache Schnittstellen mit leistungsstarken Funktionen. -
          - - -
          - -4. Dies ist ein Open-Source-Projekt, das sich "selbst übersetzen" kann. -
          - -
          - -5. Die Übersetzung anderer Open-Source-Projekte ist kein Problem. -
          - -
          - -
          - -
          - -6. Dekorieren Sie [`live2d`](https://github.com/fghrsh/live2d_demo) mit kleinen Funktionen (standardmäßig deaktiviert, Änderungen an `config.py` erforderlich). -
          - -
          - -7. Neue MOSS-Sprachmodellunterstützung. -
          - -
          - -8. OpenAI-Bildgenerierung. -
          - -
          - -9. OpenAI-Audio-Analyse und Zusammenfassung. -
          - -
          - -10. Latex-Proofreading des gesamten Textes. -
          - -
          - - -## Version: -- Version 3.5 (Todo): Rufen Sie alle Funktionserweiterungen dieses Projekts mit natürlicher Sprache auf (hohe Priorität). -- Version 3.4 (Todo): Verbesserte Unterstützung mehrerer Threads für Local Large Model (LLM). -- Version 3.3: + Internet-Informationssynthese-Funktion -- Version 3.2: Funktionserweiterungen unterstützen mehr Parameter-Schnittstellen (Speicherung von Dialogen, Interpretation beliebigen Sprachcodes + gleichzeitige Abfrage jeder LLM-Kombination) -- Version 3.1: Unterstützung mehrerer GPT-Modelle gleichzeitig! Unterstützung für API2D, Unterstützung für Lastenausgleich von mehreren API-Schlüsseln. -- Version 3.0: Unterstützung von Chatglm und anderen kleinen LLMs -- Version 2.6: Umstrukturierung der Plugin-Struktur zur Verbesserung der Interaktivität, Einführung weiterer Plugins -- Version 2.5: Automatische Aktualisierung, Problembehebung bei Quelltexten großer Projekte, wenn der Text zu lang ist oder Token überlaufen. -- Version 2.4: (1) Neue Funktion zur Übersetzung des gesamten PDF-Texts; (2) Neue Funktion zum Wechseln der Position des Eingabebereichs; (3) Neue Option für vertikales Layout; (4) Optimierung von Multithread-Funktions-Plugins. -- Version 2.3: Verbesserte Interaktivität mit mehreren Threads -- Version 2.2: Funktionserweiterungen unterstützen "Hot-Reload" -- Version 2.1: Faltbares Layout -- Version 2.0: Einführung von modularisierten Funktionserweiterungen -- Version 1.0: Grundlegende Funktionengpt_academic Entwickler QQ-Gruppe-2: 610599535 - -- Bekannte Probleme - - Einige Browser-Übersetzungs-Plugins können die Frontend-Ausführung dieser Software stören. - - Sowohl eine zu hohe als auch eine zu niedrige Version von Gradio führt zu verschiedenen Ausnahmen. - -## Referenz und Lernen - -``` -Der Code bezieht sich auf viele Designs von anderen herausragenden Projekten, insbesondere: - -# Projekt 1: ChatGLM-6B der Tsinghua Universität: -https://github.com/THUDM/ChatGLM-6B - -# Projekt 2: JittorLLMs der Tsinghua Universität: -https://github.com/Jittor/JittorLLMs - -# Projekt 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Projekt 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projekt 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Mehr: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/spaces/zhenwusw/JoJoGAN/op/conv2d_gradfix.py b/spaces/zhenwusw/JoJoGAN/op/conv2d_gradfix.py deleted file mode 100644 index bb2f94bbcb8132299fd4d538972d32bd7ff6e7d6..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/op/conv2d_gradfix.py +++ /dev/null @@ -1,227 +0,0 @@ -import contextlib -import warnings - -import torch -from torch import autograd -from torch.nn import functional as F - -enabled = True -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if could_use_op(input): - return conv2d_gradfix( - transpose=False, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=0, - dilation=dilation, - groups=groups, - ).apply(input, weight, bias) - - return F.conv2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - ) - - -def conv_transpose2d( - input, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, -): - if could_use_op(input): - return conv2d_gradfix( - transpose=True, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=output_padding, - groups=groups, - dilation=dilation, - ).apply(input, weight, bias) - - return F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - output_padding=output_padding, - dilation=dilation, - groups=groups, - ) - - -def could_use_op(input): - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - - if input.device.type != "cuda": - return False - - if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): - return True - - warnings.warn( - f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." - ) - - return False - - -def ensure_tuple(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - - return xs - - -conv2d_gradfix_cache = dict() - - -def conv2d_gradfix( - transpose, weight_shape, stride, padding, output_padding, dilation, groups -): - ndim = 2 - weight_shape = tuple(weight_shape) - stride = ensure_tuple(stride, ndim) - padding = ensure_tuple(padding, ndim) - output_padding = ensure_tuple(output_padding, ndim) - dilation = ensure_tuple(dilation, ndim) - - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in conv2d_gradfix_cache: - return conv2d_gradfix_cache[key] - - common_kwargs = dict( - stride=stride, padding=padding, dilation=dilation, groups=groups - ) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - class Conv2d(autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - if not transpose: - out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - else: - out = F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - output_padding=output_padding, - **common_kwargs, - ) - - ctx.save_for_backward(input, weight) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input, grad_weight, grad_bias = None, None, None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, weight, None) - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum((0, 2, 3)) - - return grad_input, grad_weight, grad_bias - - class Conv2dGradWeight(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - "aten::cudnn_convolution_backward_weight" - if not transpose - else "aten::cudnn_convolution_transpose_backward_weight" - ) - flags = [ - torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, - torch.backends.cudnn.allow_tf32, - ] - grad_weight = op( - weight_shape, - grad_output, - input, - padding, - stride, - dilation, - groups, - *flags, - ) - ctx.save_for_backward(grad_output, input) - - return grad_weight - - @staticmethod - def backward(ctx, grad_grad_weight): - grad_output, input = ctx.saved_tensors - grad_grad_output, grad_grad_input = None, None - - if ctx.needs_input_grad[0]: - grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, grad_grad_weight, None) - - return grad_grad_output, grad_grad_input - - conv2d_gradfix_cache[key] = Conv2d - - return Conv2d diff --git a/spaces/zhoucr/ai-koni/text/cleaners.py b/spaces/zhoucr/ai-koni/text/cleaners.py deleted file mode 100644 index da305b71bb905d19daa7fe7acb8f8eb955092a7d..0000000000000000000000000000000000000000 --- a/spaces/zhoucr/ai-koni/text/cleaners.py +++ /dev/null @@ -1,333 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import jieba -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin,BOPOMOFO - - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def basic_cleaners(text): - '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def transliteration_cleaners(text): - '''Pipeline for non-English text that transliterates to ASCII.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def japanese_cleaners(text): - '''Pipeline for notating accent in Japanese text. - Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - reply_language="中文", - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts - if reply_language == "跟随问题语言(不稳定)": - reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot+[(inputs, "")], history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot+[(inputs, "")], history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot, reply_language) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - .replace("{reply_language}", reply_language ) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot+[(inputs, "")], history, status_text, all_token_counts - return - elif len(inputs.strip()) == 0: - status_text = standard_error_msg + no_input_msg - logging.info(status_text) - yield chatbot+[(inputs, "")], history, status_text, all_token_counts - return - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - if shared.state.interrupted: - shared.state.recover() - return - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - reply_language="中文", -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - reply_language=reply_language, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], - reply_language="中文", -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - reply_language=reply_language, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕")
      FeatureDescription
      MoneyThis feature allows you to add unlimited money to your account, either by dropping cash bags, robbing banks, or changing your bank balance.
      WeaponsThis feature allows you to spawn any weapon that you want, either by giving yourself all weapons, selecting a specific weapon, or customizing your weapon options.
      VehiclesThis feature allows you to spawn any vehicle that you want, either by selecting from a list of vehicles, entering a vehicle name, or customizing your vehicle options.
      PlayerThis feature allows you to change your character's appearance and skills, such as changing your model, outfit, hair, tattoos, stats, abilities, and more.
      TeleportThis feature allows you to teleport to any location on the map, either by selecting from a list of locations, entering coordinates, or setting a waypoint.
      CheatsThis feature allows you to enable various cheats in the game, such as invincibility, super speed, super jump, explosive bullets, flaming fists, and more.
      MissionsThis feature allows you to complete missions and unlock achievements in the game, either by selecting from a list of missions, entering a mission name, or customizing your mission options.
      WorldThis feature allows you to modify the game world and environment, such as changing the weather, time, gravity, traffic, police, animals, and more.