id stringlengths 2 115 | author stringlengths 2 42 ⌀ | last_modified timestamp[us, tz=UTC] | downloads int64 0 8.87M | likes int64 0 3.84k | paperswithcode_id stringlengths 2 45 ⌀ | tags list | lastModified timestamp[us, tz=UTC] | createdAt stringlengths 24 24 | key stringclasses 1 value | created timestamp[us] | card stringlengths 1 1.01M | embedding list | library_name stringclasses 21 values | pipeline_tag stringclasses 27 values | mask_token null | card_data null | widget_data null | model_index null | config null | transformers_info null | spaces null | safetensors null | transformersInfo null | modelId stringlengths 5 111 ⌀ | embeddings list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
realmongx2/m0delz | realmongx2 | 2023-11-29T14:50:42Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T14:50:42Z | 2023-09-14T15:54:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | realmongx2/m0delz | [
-0.32276463508605957,
-0.22568437457084656,
0.8622260093688965,
0.43461504578590393,
-0.5282986760139465,
0.7012966275215149,
0.7915719747543335,
0.07618647813796997,
0.7746024131774902,
0.2563219368457794,
-0.7852815389633179,
-0.22573824226856232,
-0.910447895526886,
0.5715669393539429,
... |
vorstcavry/v | vorstcavry | 2023-11-29T14:52:54Z | 0 | 0 | null | [
"transformers",
"endpoints_compatible",
"region:us"
] | 2023-11-29T14:52:54Z | 2023-09-15T06:54:58.000Z | null | null | Entry not found | null | transformers | null | null | null | null | null | null | null | null | null | null | vorstcavry/v | [
-0.32276463508605957,
-0.22568437457084656,
0.8622260093688965,
0.43461504578590393,
-0.5282986760139465,
0.7012966275215149,
0.7915719747543335,
0.07618647813796997,
0.7746024131774902,
0.2563219368457794,
-0.7852815389633179,
-0.22573824226856232,
-0.910447895526886,
0.5715669393539429,
... |
dragoncrack/https___www_donationalerts_com_r_crack_dragon | dragoncrack | 2023-11-29T12:22:56Z | 0 | 0 | null | [
"license:openrail",
"region:us"
] | 2023-11-29T12:22:56Z | 2023-09-20T12:21:38.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | dragoncrack/https___www_donationalerts_com_r_crack_dragon | [
-0.12853394448757172,
-0.18616753816604614,
0.6529128551483154,
0.4943627119064331,
-0.19319315254688263,
0.2360745221376419,
0.3607199192047119,
0.050563275814056396,
0.5793652534484863,
0.740013837814331,
-0.6508102416992188,
-0.23783978819847107,
-0.7102248668670654,
-0.0478257760405540... |
Rootreck/so-vits-svc-4.0-Fallout_4 | Rootreck | 2023-11-29T16:16:49Z | 0 | 0 | null | [
"Fallout",
"en",
"ru",
"region:us"
] | 2023-11-29T16:16:49Z | 2023-09-20T19:01:20.000Z | null | null | ---
language:
- en
- ru
tags:
- Fallout
---
Eng = These are character voice models from Fallout 4, trained for so-vits-svc-4.1.21
Rus = Это модели голосов персонажей из Fallout 4, обученные для so-vits-svc-4.1.21 | null | null | null | null | null | null | null | null | null | null | null | null | Rootreck/so-vits-svc-4.0-Fallout_4 | [
-0.0655485987663269,
-0.23461958765983582,
0.3930397629737854,
0.17403168976306915,
-0.0823599323630333,
0.5871867537498474,
0.327926367521286,
-0.46565765142440796,
0.36212536692619324,
0.4993411600589752,
-1.5690900087356567,
-0.6185620427131653,
-0.11867588758468628,
0.6303946375846863,... |
BadDotNetDev/AI-Terapeut | BadDotNetDev | 2023-11-29T11:51:17Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T11:51:17Z | 2023-09-21T10:05:42.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | BadDotNetDev/AI-Terapeut | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
GilbertClaus/Latihan | GilbertClaus | 2023-11-29T20:24:52Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T20:24:52Z | 2023-09-22T12:06:05.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | GilbertClaus/Latihan | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
juancvcv/realcartoonXL_v3 | juancvcv | 2023-11-29T13:31:26Z | 0 | 0 | null | [
"license:other",
"region:us"
] | 2023-11-29T13:31:26Z | 2023-09-25T08:05:56.000Z | null | null | ---
license: other
---
| null | null | null | null | null | null | null | null | null | null | null | null | juancvcv/realcartoonXL_v3 | [
-0.1285337656736374,
-0.18616777658462524,
0.6529129147529602,
0.4943626821041107,
-0.19319315254688263,
0.23607446253299713,
0.3607197403907776,
0.05056322365999222,
0.5793652534484863,
0.740013837814331,
-0.6508102416992188,
-0.23783965408802032,
-0.7102248668670654,
-0.04782604798674583... |
Blocktoast64/Blocktoasts-RVC-Models | Blocktoast64 | 2023-11-30T01:28:29Z | 0 | 5 | null | [
"license:openrail",
"region:us"
] | 2023-11-30T01:28:29Z | 2023-09-25T22:09:31.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | Blocktoast64/Blocktoasts-RVC-Models | [
-0.1285337656736374,
-0.18616777658462524,
0.6529129147529602,
0.4943626821041107,
-0.19319315254688263,
0.23607446253299713,
0.3607197403907776,
0.05056322365999222,
0.5793652534484863,
0.740013837814331,
-0.6508102416992188,
-0.23783965408802032,
-0.7102248668670654,
-0.04782604798674583... |
Cloudxego/Chaeryeong | Cloudxego | 2023-11-29T05:14:56Z | 0 | 0 | null | [
"license:openrail",
"region:us"
] | 2023-11-29T05:14:56Z | 2023-09-25T22:35:21.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | Cloudxego/Chaeryeong | [
-0.12853401899337769,
-0.1861673891544342,
0.6529126763343811,
0.4943625330924988,
-0.19319301843643188,
0.23607464134693146,
0.3607196807861328,
0.05056333541870117,
0.5793654322624207,
0.740013837814331,
-0.6508100628852844,
-0.23783957958221436,
-0.7102248668670654,
-0.04782595857977867... |
xixiyyds/xxxxxx | xixiyyds | 2023-11-29T23:26:15Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T23:26:15Z | 2023-09-27T04:02:03.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | xixiyyds/xxxxxx | [
-0.3227650225162506,
-0.22568444907665253,
0.8622258901596069,
0.43461504578590393,
-0.5282988548278809,
0.7012965679168701,
0.7915717959403992,
0.0761863961815834,
0.7746025919914246,
0.2563222050666809,
-0.7852813005447388,
-0.22573848068714142,
-0.910447895526886,
0.5715667009353638,
... |
EarthnDusk/Loras-Dump-EarthNDusk-2023 | EarthnDusk | 2023-11-29T06:25:50Z | 0 | 0 | null | [
"lora",
"stable diffusion",
"en",
"dataset:EarthnDusk/Creative-Embeddings",
"license:creativeml-openrail-m",
"region:us"
] | 2023-11-29T06:25:50Z | 2023-09-28T02:57:55.000Z | null | null | ---
license: creativeml-openrail-m
datasets:
- EarthnDusk/Creative-Embeddings
language:
- en
tags:
- lora
- stable diffusion
---
Our OTHER LORA Dumpster is getting QUITE FULL - This DOES NOT include SDXL Loras.
We rarely train Locon and or Loha - so those will still go in the other repo!
Warning: there may be DOUBLES in this.
Want to see more? We're starting to release EXCLUSIVE Content via our patreon: https://patreon.com/earthndusk
"WE"? - We have Dissociative identity disorder, ADHD, Autism and CPTSD - "WE" as in we're a system of over 200 alters, and we're not ashamed about it. We believe that AI can break down barriers in some aspects of mental health, but we also believe that AI can hinder aspects of it.
We're gunning for our huge exhibition project watch this space: https://www.end-media.org
Our photography (WHEN WE DID IT) is available for FREE via Unsplash and feel free to use it in a Lora or a Model:
https://unsplash.com/@duskfallcrew
WE ARE PROUDLY SPONSORED BY: https://www.piratediffusion.com/
JOIN THE DISCORD AND DEMAND THINGS OF US:https://discord.gg/5t2kYxt7An
JOIN OUR DA GROUP: https://www.deviantart.com/diffusionai
JOIN OUR SUBREDDIT: https://www.reddit.com/r/earthndusk/ | null | null | null | null | null | null | null | null | null | null | null | null | EarthnDusk/Loras-Dump-EarthNDusk-2023 | [
-0.7339159250259399,
-0.5032265186309814,
0.45507293939590454,
-0.03729633986949921,
-0.5929321050643921,
0.15971355140209198,
0.31156817078590393,
-0.7382181882858276,
0.8219791054725647,
0.5732947587966919,
-0.9173764586448669,
-0.4584747850894928,
-0.7468770146369934,
-0.012745420448482... |
Elpepe57/Lusimi | Elpepe57 | 2023-11-29T22:16:48Z | 0 | 0 | null | [
"license:cc",
"region:us"
] | 2023-11-29T22:16:48Z | 2023-10-03T18:51:29.000Z | null | null | ---
license: cc
---
| null | null | null | null | null | null | null | null | null | null | null | null | Elpepe57/Lusimi | [
-0.12853401899337769,
-0.1861673891544342,
0.6529126763343811,
0.4943625330924988,
-0.19319301843643188,
0.23607464134693146,
0.3607196807861328,
0.05056333541870117,
0.5793654322624207,
0.740013837814331,
-0.6508100628852844,
-0.23783957958221436,
-0.7102248668670654,
-0.04782595857977867... |
marcosprun/videos | marcosprun | 2023-11-30T01:19:52Z | 0 | 0 | null | [
"license:openrail",
"region:us"
] | 2023-11-30T01:19:52Z | 2023-10-04T00:51:22.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | marcosprun/videos | [
-0.12853394448757172,
-0.1861671805381775,
0.6529130339622498,
0.49436283111572266,
-0.1931932270526886,
0.23607474565505981,
0.3607197403907776,
0.05056331306695938,
0.5793652534484863,
0.7400139570236206,
-0.6508102416992188,
-0.23783963918685913,
-0.7102248668670654,
-0.0478258728981018... |
xavierbarbier/distilbert-base-uncased-kaggle-llm-science-exam | xavierbarbier | 2023-11-29T09:38:56Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T09:38:56Z | 2023-10-04T10:35:32.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | xavierbarbier/distilbert-base-uncased-kaggle-llm-science-exam | [
-0.3227651119232178,
-0.22568456828594208,
0.8622261881828308,
0.43461447954177856,
-0.5282989740371704,
0.7012965083122253,
0.7915719747543335,
0.0761861652135849,
0.7746025323867798,
0.25632235407829285,
-0.7852817177772522,
-0.22573819756507874,
-0.9104477763175964,
0.5715669393539429,
... |
E31RAI/gishikiv | E31RAI | 2023-11-29T02:41:29Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T02:41:29Z | 2023-10-04T19:17:39.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | E31RAI/gishikiv | [
-0.3227651119232178,
-0.22568456828594208,
0.8622261881828308,
0.43461447954177856,
-0.5282989740371704,
0.7012965083122253,
0.7915719747543335,
0.0761861652135849,
0.7746025323867798,
0.25632235407829285,
-0.7852817177772522,
-0.22573819756507874,
-0.9104477763175964,
0.5715669393539429,
... |
Nan-Do/LeetCodeWizard_7B_V1.1 | Nan-Do | 2023-11-29T03:11:16Z | 0 | 0 | null | [
"transformers",
"pytorch",
"llama",
"text-generation",
"Python",
"Leetcode",
"Problem Solving",
"CP",
"en",
"license:llama2",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | 2023-11-29T03:11:16Z | 2023-10-05T00:33:48.000Z | null | null | ---
license: llama2
language:
- en
tags:
- Python
- Leetcode
- Problem Solving
- CP
---
# LeetCodeWizard: A LLM for mastering programming interviews and solving programming problems.
<p align="center">
<img src="https://raw.githubusercontent.com/Nan-Do/LeetCodeContestsDataset/main/LeetCodeWizardLogo.png" width="350"/>
</p>
**What is LeetCodeWizard?**
- LeetCodeWizard is a coding large language model specifically trained to solve and explain Leetcode (or any) programming problems.
**How was the model developed?**
- This model is a fine-tuned version of the [WizardCoder-Python-7B](https://huggingface.co/WizardLM/WizardCoder-Python-7B-V1.0) with a dataset of [Leetcode problems](https://github.com/Nan-Do/LeetCodeContestsDataset)
**Model capabilities:**
- It should be able to solve most of the problems found at [Leetcode](https://leetcode.com/) and even pass the sample interviews they offer on the site.
- It can write both the code and the explanations for the solutions.
**Prompt template:**
- This model uses the alpaca instruction/response prompt style (the input field is not neccesary).
| Model | Checkpoint | Base Model |
|--------------------|---------------------------------------------------------------------------------------------|------------|
| LeetCodeWizard-13B-V1.1 | 🤗 <a href="https://huggingface.co/Nan-Do/LeetCodeWizard_13B_v1.1a/tree/main" target="_blank">HF Link</a> | WizardCoder-Python-13B-V1.1 |
| LeetCodeWizard-7B-V1.1 | 🤗 <a href="https://huggingface.co/Nan-Do/LeetCodeWizard_7B_V1.1/tree/main" target="_blank">HF Link</a> | WizardCoder-Python-7B-V1.1 |
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
| null | transformers | text-generation | null | null | null | null | null | null | null | null | null | Nan-Do/LeetCodeWizard_7B_V1.1 | [
-0.5415382981300354,
-0.8386220932006836,
0.2061167061328888,
0.3816488981246948,
0.27935680747032166,
0.15862420201301575,
0.04527677595615387,
-0.22174374759197235,
0.3035755157470703,
0.3749023377895355,
-0.6648810505867004,
-0.5154604911804199,
-0.41884833574295044,
-0.0271847061812877... |
AayushA/ddpm-butterflies-128 | AayushA | 2023-11-29T13:57:01Z | 0 | 0 | null | [
"tensorboard",
"license:mit",
"region:us"
] | 2023-11-29T13:57:01Z | 2023-10-05T08:11:57.000Z | null | null | ---
license: mit
---
| null | null | null | null | null | null | null | null | null | null | null | null | AayushA/ddpm-butterflies-128 | [
-0.12853386998176575,
-0.18616794049739838,
0.6529127359390259,
0.4943622946739197,
-0.19319306313991547,
0.2360745519399643,
0.36072012782096863,
0.05056336894631386,
0.579365611076355,
0.740013837814331,
-0.6508102416992188,
-0.23784014582633972,
-0.7102251052856445,
-0.04782590642571449... |
sokobanni/3DGS_viewerTest | sokobanni | 2023-11-29T14:31:31Z | 0 | 0 | null | [
"license:unknown",
"region:us"
] | 2023-11-29T14:31:31Z | 2023-10-05T13:55:48.000Z | null | null | ---
license: unknown
---
| null | null | null | null | null | null | null | null | null | null | null | null | sokobanni/3DGS_viewerTest | [
-0.12853386998176575,
-0.18616794049739838,
0.6529127359390259,
0.4943622946739197,
-0.19319306313991547,
0.2360745519399643,
0.36072012782096863,
0.05056336894631386,
0.579365611076355,
0.740013837814331,
-0.6508102416992188,
-0.23784014582633972,
-0.7102251052856445,
-0.04782590642571449... |
Nan-Do/LeetCodeWizard_13B_v1.1a | Nan-Do | 2023-11-29T03:14:34Z | 0 | 0 | null | [
"transformers",
"pytorch",
"llama",
"text-generation",
"Python",
"Leetcode",
"Problem Solving",
"CP",
"en",
"license:llama2",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | 2023-11-29T03:14:34Z | 2023-10-06T03:34:42.000Z | null | null | ---
license: llama2
language:
- en
tags:
- Python
- Leetcode
- Problem Solving
- CP
---
# LeetCodeWizard: A LLM for mastering programming interviews and solving programming problems.
<p align="center">
<img src="https://raw.githubusercontent.com/Nan-Do/LeetCodeContestsDataset/main/LeetCodeWizardLogo.png" width="350"/>
</p>
**What is LeetCodeWizard?**
- LeetCodeWizard is a coding large language model specifically trained to solve and explain Leetcode (or any) programming problems.
**How was the model developed?**
- This model is a fine-tuned version of the [WizardCoder-Python-13B](https://huggingface.co/WizardLM/WizardCoder-Python-13B-V1.0) with a dataset of [Leetcode problems](https://github.com/Nan-Do/LeetCodeContestsDataset)
**Model capabilities:**
- It should be able to solve most of the problems found at [Leetcode](https://leetcode.com/) and even pass the sample interviews they offer on the site.
- It can write both the code and the explanations for the solutions.
**Prompt template:**
- This model uses the alpaca instruction/response prompt style (the input field is not neccesary).
| Model | Checkpoint | Base Model |
|--------------------|---------------------------------------------------------------------------------------------|------------|
| LeetCodeWizard-13B-V1.1 | 🤗 <a href="https://huggingface.co/Nan-Do/LeetCodeWizard_13B_v1.1a/tree/main" target="_blank">HF Link</a> | WizardCoder-Python-13B-V1.1 |
| LeetCodeWizard-7B-V1.1 | 🤗 <a href="https://huggingface.co/Nan-Do/LeetCodeWizard_7B_V1.1/tree/main" target="_blank">HF Link</a> | WizardCoder-Python-7B-V1.1 |
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) | null | transformers | text-generation | null | null | null | null | null | null | null | null | null | Nan-Do/LeetCodeWizard_13B_v1.1a | [
-0.5433085560798645,
-0.8432263135910034,
0.2058280110359192,
0.3898010551929474,
0.2829621732234955,
0.15646064281463623,
0.04319023713469505,
-0.2216043472290039,
0.30410611629486084,
0.3673090934753418,
-0.6768150329589844,
-0.5144538283348083,
-0.4197748005390167,
-0.026872189715504646... |
modpotato/public_models | modpotato | 2023-11-29T19:57:41Z | 0 | 0 | null | [
"rvc",
"audio-to-audio",
"en",
"region:us"
] | 2023-11-29T19:57:41Z | 2023-10-06T04:01:41.000Z | null | null | ---
language:
- en
pipeline_tag: audio-to-audio
tags:
- rvc
---
# mods rvc models
repo for rvc models ive made (dm me on discord (modpotato) for commisions)
[Open an issue](https://huggingface.co/Gustavosta/SowlfieModelsRVC/discussions/new)!
## 🎤 New RVC Models:
(all of these are trained until no improvement noticed)
| Model | Epochs | Language | Preview |
|---|:---:|---:|---|
| [Androxus (Paladins)](https://huggingface.co/modpotato/public_models/blob/main/adnorox_fittest.zip) | 123 epochs) | english | [Androxus from Paladins - Billion Dollar Baby](https://www.youtube.com/watch?v=BrOO9AQySPk) |
| [a literal fucking sine wave](https://huggingface.co/modpotato/public_models/blob/main/a%20literal%20sine%20wave_fittest.zip) | 197 epochs | ????? | [games but its sung by a literal sine wave](https://youtu.be/-omYMgHoyRA) |
| [square wave](https://huggingface.co/modpotato/public_models/blob/main/square%20wave.zip) | 42 epochs (may retrain) | ????? | [games but its sung by a literal square wave](https://www.youtube.com/watch?v=nqpvXi_Vpls) |
| [saw wave](https://huggingface.co/modpotato/public_models/blob/main/square%20wave.zip) | 774 epochs | ????? | [games but its sung by a literal saw wave](https://www.youtube.com/watch?v=-iQVvLWSUg0) |
| [Nightbringer Yasuo (LoL)](https://huggingface.co/modpotato/public_models/blob/main/nightbringer%20yasuo.zip) | 370 epochs | english | [i want it that way sung by Nightbringer Yasuo (LoL)](https://www.youtube.com/watch?v=I3qT4StTXI0) |
| [triangle wave](https://huggingface.co/modpotato/public_models/blob/main/triangle%20wave_fittest.zip) | around 350 | ????? | [games but its sung by a literal triangle wave](https://www.youtube.com/watch?v=Ry2OBYCcJO8) |
| [Corvus (Paladins)](https://huggingface.co/modpotato/public_models/blob/main/corvus_fittest.zip) | around 350 | english | [corvus sings ballin](https://youtu.be/RxiqERTi9LU) |
| [Otzdarva (Youtuber)](https://huggingface.co/modpotato/public_models/blob/main/otzdarva_fittest.zip) | no idea | english | [otz sings 3 big balls](https://youtu.be/5kQoVrTDFuA) |
| [DJ Smokey (fixed)](https://huggingface.co/modpotato/public_models/blob/main/dj%20smokey_v2.zip) | no idea | english | [DJ Smokey - ryte night](https://www.youtube.com/watch?v=VNfBj6P2-Fw) |
| [Mordekaiser (LoL)](https://huggingface.co/modpotato/public_models/blob/main/mordekaiser.zip) | no idea | english | none atm |
| [Sydney (Payday 2)](https://huggingface.co/modpotato/public_models/blob/main/sydney_(payday_2)_fittest.zip) | no idea | english | none atm |
| [Jiro (Payday 2)](https://huggingface.co/modpotato/public_models/blob/main/jiro_payday_2_fittest.zip) | no idea | japanese | none atm |
| [car names meme guy](https://huggingface.co/modpotato/public_models/blob/main/car%20names%20guy_fittest.zip) | no idea | english | none atm |
| [Nihilanth](https://huggingface.co/modpotato/public_models/blob/main/Nihilanth_fittest.zip) | no idea | ????? | none atm |
| [OOF sfx](https://huggingface.co/modpotato/public_models/blob/main/oof_sfx_fittest.zip) | no idea | oof | none atm |
| [jeff (half life 2)](https://huggingface.co/modpotato/public_models/blob/main/HL-jeff_fittest.zip) | no idea | ????? | none atm |
| [Slade (Teen Titans)](https://huggingface.co/modpotato/public_models/blob/main/slade_teen-titans.zip) | no idea | ~250 | none atm |
| [metal pipe sfx](https://huggingface.co/modpotato/public_models/blob/main/metal_pipe_fittest.zip) | no idea | ~250 | none atm |
| [NTTS](https://huggingface.co/modpotato/public_models/blob/main/NTTS_mini_fittest.zip) | no idea | ????? | none atm |
## 🤢 Old RVC Models:
| Model | Epochs | Language | Preview |
|---|:---:|---:|---|
| [DJ Smokey (legalize nuclear bombs)](https://huggingface.co/modpotato/public_models/blob/main/test-dj-smokey.zip) | 1k epochs | english | [DJ Smokey - ryte night](https://youtu.be/VNfBj6P2-Fw) |
| [ChaCha (Akazukin Chacha)](https://huggingface.co/modpotato/public_models/blob/main/chacha.zip) | 300 epochs | english dub | [ChaCha - ryte night](https://youtu.be/wRIIleSQX94) |
| [Link (CD-i)](https://huggingface.co/modpotato/public_models/blob/main/Link%20(CD-i).zip) | 300 epochs | english | [link miss me with that nonsense (actually sung by link)](https://youtu.be/uBaj0kpFKf8) |
yeah i ripped this from some other huggingface acc | null | null | audio-to-audio | null | null | null | null | null | null | null | null | null | modpotato/public_models | [
-0.7452715039253235,
-0.5789803862571716,
0.19354362785816193,
0.35474148392677307,
-0.24881993234157562,
0.09051492065191269,
0.07417584210634232,
-0.776994526386261,
0.5819816589355469,
0.6098169088363647,
-1.0012835264205933,
-0.5525773167610168,
-0.3058622181415558,
0.15027596056461334... |
yentinglin/Taiwan-LLM-7B-v2.0-base | yentinglin | 2023-11-29T05:55:57Z | 0 | 11 | null | [
"transformers",
"safetensors",
"llama",
"text-generation",
"zh",
"license:apache-2.0",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | 2023-11-29T05:55:57Z | 2023-10-06T10:06:12.000Z | null | null | ---
license: apache-2.0
language:
- zh
widget:
- text: "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: 你好,請問你可以幫我寫一封推薦信嗎? ASSISTANT:"
library_name: transformers
pipeline_tag: text-generation
extra_gated_heading: Acknowledge license to accept the repository.
extra_gated_prompt: Please contact the author for access.
extra_gated_button_content: Acknowledge license 同意以上內容
extra_gated_fields:
Name: text
Mail: text
Organization: text
Country: text
Any utilization of the Taiwan LLM repository mandates the explicit acknowledgment and attribution to the original author: checkbox
使用Taiwan LLM必須明確地承認和歸功於優必達株式會社 Ubitus 以及原始作者: checkbox
---
<img src="https://cdn-uploads.huggingface.co/production/uploads/5df9c78eda6d0311fd3d541f/CmusIT5OlSXvFrbTJ7l-C.png" alt="Taiwan LLM Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
# 🌟 Checkout [Taiwan-LLM Demo Chat-UI](http://www.twllm.com) 🌟
# Model Card for Taiwan LLM 7B v2.0 base
Taiwan LLM is an advanced language model tailored for Traditional Chinese, focusing on the linguistic and cultural contexts of Taiwan.
Developed from a large base model, it's enriched with diverse Taiwanese textual sources and refined through Supervised Fine-Tuning.
This model excels in language understanding and generation, aligning closely with Taiwan's cultural nuances.
It demonstrates improved performance on various benchmarks like TC-Eval, showcasing its contextual comprehension and cultural relevance.
For detailed insights into Taiwan LLM's development and features, refer to our [technical report](https://github.com/MiuLab/Taiwan-LLaMa/blob/main/twllm_paper.pdf).
## Model description
- **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets.
- **Language(s) (NLP):** Primarily Traditional Chinese (zh-tw)
- **Finetuned from model:** [meta-llama/Llama-2-7b-hf](https://huggingface.co/yentinglin/meta-llama/Llama-2-7b-hf)
### Model Sources
<!-- Provide the basic links for the model. -->
- **Repository:** https://github.com/MiuLab/Taiwan-LLaMa
- **Demo:** https://twllm.com/
## Performance

## Intended uses
You should fine-tuned this model for instruction-following / chat application.
### Training hyperparameters



The following hyperparameters were used during training:
- learning_rate: 5e-05
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.03
- num_epochs: 5.0
## Citation
If you find Taiwan LLM is useful in your work, please cite it with:
```
@inproceedings{lin-chen-2023-llm,
title = "{LLM}-Eval: Unified Multi-Dimensional Automatic Evaluation for Open-Domain Conversations with Large Language Models",
author = "Lin, Yen-Ting and Chen, Yun-Nung",
booktitle = "Proceedings of the 5th Workshop on NLP for Conversational AI (NLP4ConvAI 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.nlp4convai-1.5",
pages = "47--58"
}
@misc{taiwanllama,
author={Lin, Yen-Ting and Chen, Yun-Nung},
title={Language Models for Taiwanese Culture},
year={2023},
url={https://github.com/MiuLab/Taiwan-LLaMa},
note={Code and models available at https://github.com/MiuLab/Taiwan-LLaMa},
}
```
# Acknowledgement
Taiwan LLM v2 is conducted in collaboration with [Ubitus K.K.](http://ubitus.net). Ubitus provides valuable compute resources for the project.
| null | transformers | text-generation | null | null | null | null | null | null | null | null | null | yentinglin/Taiwan-LLM-7B-v2.0-base | [
-0.3944850265979767,
-1.044699788093567,
0.3315112292766571,
0.45413637161254883,
-0.5318340063095093,
0.11665576696395874,
-0.5230771899223328,
-0.6743147969245911,
0.28886228799819946,
0.4799889326095581,
-0.35900112986564636,
-0.6853320002555847,
-0.46950098872184753,
0.0411919169127941... |
E31RAI/Lora | E31RAI | 2023-11-29T03:01:31Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T03:01:31Z | 2023-10-07T02:05:46.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | E31RAI/Lora | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
sadakiti/model | sadakiti | 2023-11-29T14:24:09Z | 0 | 0 | null | [
"license:creativeml-openrail-m",
"region:us"
] | 2023-11-29T14:24:09Z | 2023-10-07T14:48:30.000Z | null | null | ---
license: creativeml-openrail-m
---
| null | null | null | null | null | null | null | null | null | null | null | null | sadakiti/model | [
-0.1285340040922165,
-0.1861676573753357,
0.6529127955436707,
0.49436259269714355,
-0.19319328665733337,
0.23607435822486877,
0.36072009801864624,
0.05056355893611908,
0.579365611076355,
0.7400140166282654,
-0.6508103609085083,
-0.23783960938453674,
-0.7102246284484863,
-0.0478256717324256... |
Nikhil090/Dataset | Nikhil090 | 2023-11-29T09:19:52Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T09:19:52Z | 2023-10-09T09:49:42.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | Nikhil090/Dataset | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
vorstcavry/test-upload | vorstcavry | 2023-11-29T04:12:57Z | 0 | 0 | null | [
"transformers",
"endpoints_compatible",
"region:us"
] | 2023-11-29T04:12:57Z | 2023-10-09T13:33:01.000Z | null | null | Entry not found | null | transformers | null | null | null | null | null | null | null | null | null | null | vorstcavry/test-upload | [
-0.32276493310928345,
-0.22568459808826447,
0.8622260093688965,
0.4346148371696472,
-0.5282988548278809,
0.7012964487075806,
0.7915716767311096,
0.07618658244609833,
0.7746025323867798,
0.25632205605506897,
-0.7852813601493835,
-0.22573843598365784,
-0.910447895526886,
0.5715667009353638,
... |
hylee719/transcript-analysis-testing | hylee719 | 2023-11-29T03:16:31Z | 0 | 0 | null | [
"license:gpl",
"endpoints_compatible",
"region:us"
] | 2023-11-29T03:16:31Z | 2023-10-11T01:48:55.000Z | null | null | ---
license: gpl
---
| null | null | null | null | null | null | null | null | null | null | null | null | hylee719/transcript-analysis-testing | [
-0.1285340040922165,
-0.1861676573753357,
0.6529127955436707,
0.49436259269714355,
-0.19319328665733337,
0.23607435822486877,
0.36072009801864624,
0.05056355893611908,
0.579365611076355,
0.7400140166282654,
-0.6508103609085083,
-0.23783960938453674,
-0.7102246284484863,
-0.0478256717324256... |
castorini/rankllama-v1-7b-lora-passage | castorini | 2023-11-29T22:35:19Z | 0 | 2 | null | [
"arxiv:2310.08319",
"license:llama2",
"region:us"
] | 2023-11-29T22:35:19Z | 2023-10-11T01:57:57.000Z | null | null | ---
license: llama2
---
# RankLLaMA-7B-Passage
[Fine-Tuning LLaMA for Multi-Stage Text Retrieval](https://arxiv.org/abs/2310.08319).
Xueguang Ma, Liang Wang, Nan Yang, Furu Wei, Jimmy Lin, arXiv 2023
This model is fine-tuned from LLaMA-2-7B using LoRA for passage reranking.
## Training Data
The model is fine-tuned on the training split of [MS MARCO Passage Ranking](https://microsoft.github.io/msmarco/Datasets) datasets for 1 epoch.
Please check our paper for details.
## Usage
Below is an example to compute the similarity score of a query-passage pair
```python
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from peft import PeftModel, PeftConfig
def get_model(peft_model_name):
config = PeftConfig.from_pretrained(peft_model_name)
base_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path, num_labels=1)
model = PeftModel.from_pretrained(base_model, peft_model_name)
model = model.merge_and_unload()
model.eval()
return model
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-hf')
model = get_model('castorini/rankllama-v1-7b-lora-passage')
# Define a query-passage pair
query = "What is llama?"
title = "Llama"
passage = "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."
# Tokenize the query-passage pair
inputs = tokenizer(f'query: {query}', f'document: {title} {passage}', return_tensors='pt')
# Run the model forward
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
score = logits[0][0]
print(score)
```
## Batch inference and training
An unofficial replication of the inference and training code can be found [here](https://github.com/texttron/tevatron/tree/main/examples/rankllama)
## Citation
If you find our paper or models helpful, please consider cite as follows:
```
@article{rankllama,
title={Fine-Tuning LLaMA for Multi-Stage Text Retrieval},
author={Xueguang Ma and Liang Wang and Nan Yang and Furu Wei and Jimmy Lin},
year={2023},
journal={arXiv:2310.08319},
}
``` | null | null | null | null | null | null | null | null | null | null | null | null | castorini/rankllama-v1-7b-lora-passage | [
-0.040633268654346466,
-0.7243825793266296,
0.6957443356513977,
0.2890571653842926,
-0.478077232837677,
0.05404260382056236,
0.02024189382791519,
-0.26506683230400085,
0.18810702860355377,
0.5071351528167725,
-0.39997923374176025,
-0.9462002515792847,
-0.6761670708656311,
0.176455587148666... |
castorini/repllama-v1-7b-lora-passage | castorini | 2023-11-29T22:36:06Z | 0 | 1 | null | [
"arxiv:2310.08319",
"license:llama2",
"region:us"
] | 2023-11-29T22:36:06Z | 2023-10-11T02:01:45.000Z | null | null | ---
license: llama2
---
# RepLLaMA-7B-Passage
[Fine-Tuning LLaMA for Multi-Stage Text Retrieval](https://arxiv.org/abs/2310.08319).
Xueguang Ma, Liang Wang, Nan Yang, Furu Wei, Jimmy Lin, arXiv 2023
This model is fine-tuned from LLaMA-2-7B using LoRA and the embedding size is 4096.
## Training Data
The model is fine-tuned on the training split of [MS MARCO Passage Ranking](https://microsoft.github.io/msmarco/Datasets) datasets for 1 epoch.
Please check our paper for details.
## Usage
Below is an example to encode a query and a passage, and then compute their similarity using their embedding.
```python
import torch
from transformers import AutoModel, AutoTokenizer
from peft import PeftModel, PeftConfig
def get_model(peft_model_name):
config = PeftConfig.from_pretrained(peft_model_name)
base_model = AutoModel.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(base_model, peft_model_name)
model = model.merge_and_unload()
model.eval()
return model
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-hf')
model = get_model('castorini/repllama-v1-7b-lora-passage')
# Define query and passage inputs
query = "What is llama?"
title = "Llama"
passage = "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."
query_input = tokenizer(f'query: {query}</s>', return_tensors='pt')
passage_input = tokenizer(f'passage: {title} {passage}</s>', return_tensors='pt')
# Run the model forward to compute embeddings and query-passage similarity score
with torch.no_grad():
# compute query embedding
query_outputs = model(**query_input)
query_embedding = query_outputs.last_hidden_state[0][-1]
query_embedding = torch.nn.functional.normalize(query_embedding, p=2, dim=0)
# compute passage embedding
passage_outputs = model(**passage_input)
passage_embeddings = passage_outputs.last_hidden_state[0][-1]
passage_embeddings = torch.nn.functional.normalize(passage_embeddings, p=2, dim=0)
# compute similarity score
score = torch.dot(query_embedding, passage_embeddings)
print(score)
```
## Batch inference and training
An unofficial replication of the inference and training code can be found [here](https://github.com/texttron/tevatron/tree/main/examples/repllama)
## Citation
If you find our paper or models helpful, please consider cite as follows:
```
@article{rankllama,
title={Fine-Tuning LLaMA for Multi-Stage Text Retrieval},
author={Xueguang Ma and Liang Wang and Nan Yang and Furu Wei and Jimmy Lin},
year={2023},
journal={arXiv:2310.08319},
}
``` | null | null | null | null | null | null | null | null | null | null | null | null | castorini/repllama-v1-7b-lora-passage | [
-0.10242246836423874,
-0.8066720366477966,
0.6262598037719727,
0.20274195075035095,
-0.5742142200469971,
-0.012619150802493095,
-0.03898022696375847,
-0.28031983971595764,
0.16207173466682434,
0.5537421703338623,
-0.4461878836154938,
-0.9596207737922668,
-0.5528452396392822,
0.297615379095... |
Nihonto/test | Nihonto | 2023-11-29T20:02:09Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T20:02:09Z | 2023-10-12T17:26:12.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | Nihonto/test | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
yentinglin/Taiwan-LLM-13B-v2.0-base | yentinglin | 2023-11-29T05:56:22Z | 0 | 0 | null | [
"transformers",
"safetensors",
"llama",
"text-generation",
"zh",
"license:apache-2.0",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | 2023-11-29T05:56:22Z | 2023-10-13T06:39:29.000Z | null | null |
---
# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
# Doc / guide: https://huggingface.co/docs/hub/model-cards
license: apache-2.0
language:
- zh
widget:
- text: >-
A chat between a curious user and an artificial intelligence assistant.
The assistant gives helpful, detailed, and polite answers to the user's
questions. USER: 你好,請問你可以幫我寫一封推薦信嗎? ASSISTANT:
library_name: transformers
pipeline_tag: text-generation
extra_gated_heading: Acknowledge license to accept the repository.
extra_gated_prompt: Please contact the author for access.
extra_gated_button_content: Acknowledge license 同意以上內容
extra_gated_fields:
Name: text
Mail: text
Organization: text
Country: text
Any utilization of the Taiwan LLM repository mandates the explicit acknowledgment and attribution to the original author: checkbox
使用Taiwan LLM必須明確地承認和歸功於優必達株式會社 Ubitus 以及原始作者: checkbox
---
<img src="https://cdn-uploads.huggingface.co/production/uploads/5df9c78eda6d0311fd3d541f/CmusIT5OlSXvFrbTJ7l-C.png" alt="Taiwan LLM Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
# 🌟 Checkout [Taiwan-LLM Demo Chat-UI](http://www.twllm.com) 🌟
# Model Card for Taiwan LLM 13B v2.0 base
Taiwan LLM is an advanced language model tailored for Traditional Chinese, focusing on the linguistic and cultural contexts of Taiwan.
Developed from a large base model, it's enriched with diverse Taiwanese textual sources and refined through Supervised Fine-Tuning.
This model excels in language understanding and generation, aligning closely with Taiwan's cultural nuances.
It demonstrates improved performance on various benchmarks like TC-Eval, showcasing its contextual comprehension and cultural relevance.
For detailed insights into Taiwan LLM's development and features, refer to our [technical report](https://github.com/MiuLab/Taiwan-LLaMa/blob/main/twllm_paper.pdf).
## Model description
- **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets.
- **Language(s) (NLP):** Primarily Traditional Chinese (zh-tw)
- **Finetuned from model:** [meta-llama/Llama-2-13b-hf](https://huggingface.co/yentinglin/meta-llama/Llama-2-13b-hf)
### Model Sources
<!-- Provide the basic links for the model. -->
- **Repository:** https://github.com/MiuLab/Taiwan-LLaMa
- **Demo:** https://twllm.com/
## Performance

## Intended uses
You should fine-tuned this model for instruction-following / chat application.
### Training hyperparameters



The following hyperparameters were used during training:
- learning_rate: 5e-05
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.03
- num_epochs: 5.0
## Citation
If you find Taiwan LLM is useful in your work, please cite it with:
```
@inproceedings{lin-chen-2023-llm,
title = "{LLM}-Eval: Unified Multi-Dimensional Automatic Evaluation for Open-Domain Conversations with Large Language Models",
author = "Lin, Yen-Ting and Chen, Yun-Nung",
booktitle = "Proceedings of the 5th Workshop on NLP for Conversational AI (NLP4ConvAI 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.nlp4convai-1.5",
pages = "47--58"
}
@misc{taiwanllama,
author={Lin, Yen-Ting and Chen, Yun-Nung},
title={Language Models for Taiwanese Culture},
year={2023},
url={https://github.com/MiuLab/Taiwan-LLaMa},
note={Code and models available at https://github.com/MiuLab/Taiwan-LLaMa},
}
```
# Acknowledgement
Taiwan LLM v2 is conducted in collaboration with [Ubitus K.K.](http://ubitus.net). Ubitus provides valuable compute resources for the project.
| null | transformers | text-generation | null | null | null | null | null | null | null | null | null | yentinglin/Taiwan-LLM-13B-v2.0-base | [
-0.39510801434516907,
-1.0530403852462769,
0.32909032702445984,
0.4727005362510681,
-0.5198456645011902,
0.11994653195142746,
-0.5290646553039551,
-0.6757215261459351,
0.2944716513156891,
0.46123194694519043,
-0.3860335052013397,
-0.6803910136222839,
-0.4689342677593231,
0.0379815623164176... |
chandra21/wav2vec2-large-xlsr-hindi | chandra21 | 2023-11-29T10:14:13Z | 0 | 0 | null | [
"transformers",
"tensorboard",
"safetensors",
"wav2vec2",
"automatic-speech-recognition",
"generated_from_trainer",
"dataset:common_voice_11_0",
"base_model:facebook/wav2vec2-xls-r-300m",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | 2023-11-29T10:14:13Z | 2023-10-13T08:23:17.000Z | null | null | ---
license: apache-2.0
base_model: facebook/wav2vec2-xls-r-300m
tags:
- generated_from_trainer
datasets:
- common_voice_11_0
model-index:
- name: wav2vec2-large-xlsr-hindi
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xlsr-hindi
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice_11_0 dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 10
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 3
- total_train_batch_size: 30
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.35.2
- Pytorch 2.1.0+cu118
- Datasets 2.15.0
- Tokenizers 0.15.0
| null | transformers | automatic-speech-recognition | null | null | null | null | null | null | null | null | null | chandra21/wav2vec2-large-xlsr-hindi | [
-0.37014880776405334,
-0.6730424165725708,
-0.048103801906108856,
0.28860369324684143,
-0.38615351915359497,
-0.3092785179615021,
-0.40462255477905273,
-0.4304628372192383,
0.15834125876426697,
0.3307023048400879,
-0.9258243441581726,
-0.4940301477909088,
-0.6703292727470398,
-0.2324319332... |
JapGuy/RichardMuller_v4_483Epochs_RVC_v2 | JapGuy | 2023-11-29T19:14:41Z | 0 | 0 | null | [
"music",
"rvc",
"RichardMuller",
"Richard",
"Muller",
"Müller",
"Miller",
"model",
"audio-to-audio",
"sk",
"cs",
"license:openrail",
"region:us"
] | 2023-11-29T19:14:41Z | 2023-10-15T09:51:59.000Z | null | null | ---
license: openrail
language:
- sk
- cs
pipeline_tag: audio-to-audio
tags:
- music
- rvc
- RichardMuller
- Richard
- Muller
- Müller
- Miller
- model
---

# Richard Müller [SK] (v4)
# 483 Epochs - RVC V2 - rmvpe
Trained on 38 minutes of isolated acapellas using UVR (Voc FT + Reverb HQ) + Audacity to remove parts with double vocals and vocals from others (+Noise Gate) | null | null | audio-to-audio | null | null | null | null | null | null | null | null | null | JapGuy/RichardMuller_v4_483Epochs_RVC_v2 | [
-0.6364258527755737,
-0.767653226852417,
0.12959788739681244,
0.7182629108428955,
-0.12978675961494446,
0.22741636633872986,
-0.23052535951137543,
-0.4494805634021759,
0.30410367250442505,
0.7524430751800537,
-0.9535945653915405,
-0.17736592888832092,
-0.4279744625091553,
0.188709780573844... |
E31RAI/Futa | E31RAI | 2023-11-29T03:52:23Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T03:52:23Z | 2023-10-15T21:54:00.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | E31RAI/Futa | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
RYUMIXTAPE/RVC-v2 | RYUMIXTAPE | 2023-11-29T11:41:10Z | 0 | 0 | null | [
"license:openrail",
"region:us"
] | 2023-11-29T11:41:10Z | 2023-10-17T11:05:18.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | RYUMIXTAPE/RVC-v2 | [
-0.1285337656736374,
-0.18616777658462524,
0.6529129147529602,
0.4943626821041107,
-0.19319315254688263,
0.23607446253299713,
0.3607197403907776,
0.05056322365999222,
0.5793652534484863,
0.740013837814331,
-0.6508102416992188,
-0.23783965408802032,
-0.7102248668670654,
-0.04782604798674583... |
wyzuku/RVCModelsPrivate | wyzuku | 2023-11-29T11:19:18Z | 0 | 0 | null | [
"license:openrail",
"region:us"
] | 2023-11-29T11:19:18Z | 2023-10-17T18:11:04.000Z | null | null | ---
license: openrail
---
| null | null | null | null | null | null | null | null | null | null | null | null | wyzuku/RVCModelsPrivate | [
-0.1285337656736374,
-0.18616777658462524,
0.6529129147529602,
0.4943626821041107,
-0.19319315254688263,
0.23607446253299713,
0.3607197403907776,
0.05056322365999222,
0.5793652534484863,
0.740013837814331,
-0.6508102416992188,
-0.23783965408802032,
-0.7102248668670654,
-0.04782604798674583... |
crestf411/crestfall-peft | crestf411 | 2023-11-29T06:14:06Z | 0 | 1 | null | [
"region:us"
] | 2023-11-29T06:14:06Z | 2023-10-18T01:38:22.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | crestf411/crestfall-peft | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
Xenos14/TestBed | Xenos14 | 2023-11-29T17:11:11Z | 0 | 0 | null | [
"has_space",
"region:us"
] | 2023-11-29T17:11:11Z | 2023-10-20T07:12:49.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | Xenos14/TestBed | [
-0.32276463508605957,
-0.2256849706172943,
0.8622266054153442,
0.4346153736114502,
-0.5282987952232361,
0.7012974619865417,
0.7915722131729126,
0.07618652284145355,
0.7746030688285828,
0.2563217282295227,
-0.7852814793586731,
-0.22573867440223694,
-0.9104479551315308,
0.571567177772522,
... |
pechaut/Mistral-7b-instruct-cairo-PEFT | pechaut | 2023-11-29T10:04:14Z | 0 | 0 | null | [
"generated_from_trainer",
"base_model:mistralai/Mistral-7B-Instruct-v0.1",
"license:apache-2.0",
"region:us"
] | 2023-11-29T10:04:14Z | 2023-10-20T12:52:05.000Z | null | null | ---
license: apache-2.0
base_model: mistralai/Mistral-7B-Instruct-v0.1
tags:
- generated_from_trainer
model-index:
- name: Mistral-7b-instruct-cairo-PEFT
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Mistral-7b-instruct-cairo-PEFT
This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4019
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 1
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.03
- training_steps: 1000
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 4.9419 | 0.02 | 5 | 4.6483 |
| 3.9783 | 0.04 | 10 | 4.6272 |
| 4.435 | 0.06 | 15 | 4.5826 |
| 4.2584 | 0.08 | 20 | 4.4936 |
| 4.3762 | 0.1 | 25 | 4.3248 |
| 3.9439 | 0.12 | 30 | 4.1278 |
| 3.5457 | 0.14 | 35 | 3.8557 |
| 3.9832 | 0.16 | 40 | 3.6140 |
| 3.8853 | 0.18 | 45 | 3.4018 |
| 2.9133 | 0.2 | 50 | 3.2528 |
| 2.7053 | 0.22 | 55 | 3.1320 |
| 2.2822 | 0.24 | 60 | 2.9988 |
| 3.3321 | 0.27 | 65 | 2.8657 |
| 2.6849 | 0.29 | 70 | 2.7583 |
| 2.1692 | 0.31 | 75 | 2.7543 |
| 2.7414 | 0.33 | 80 | 2.7597 |
| 2.2613 | 0.35 | 85 | 2.6587 |
| 2.4811 | 0.37 | 90 | 2.5609 |
| 2.0377 | 0.39 | 95 | 2.5793 |
| 1.945 | 0.41 | 100 | 2.6101 |
| 2.7281 | 0.43 | 105 | 2.2495 |
| 2.2824 | 0.45 | 110 | 2.1938 |
| 2.1301 | 0.47 | 115 | 2.1810 |
| 2.1599 | 0.49 | 120 | 2.1664 |
| 2.4669 | 0.51 | 125 | 2.1565 |
| 1.9863 | 0.53 | 130 | 2.1339 |
| 2.133 | 0.55 | 135 | 2.0948 |
| 1.8624 | 0.57 | 140 | 2.0498 |
| 2.0716 | 0.59 | 145 | 1.9950 |
| 1.7386 | 0.61 | 150 | 1.9598 |
| 1.52 | 0.63 | 155 | 1.9358 |
| 1.3844 | 0.65 | 160 | 1.9232 |
| 2.0719 | 0.67 | 165 | 1.9008 |
| 1.6826 | 0.69 | 170 | 1.8628 |
| 1.9418 | 0.71 | 175 | 1.8324 |
| 1.5165 | 0.73 | 180 | 1.7976 |
| 1.5764 | 0.76 | 185 | 1.7771 |
| 1.7126 | 0.78 | 190 | 1.7570 |
| 1.5553 | 0.8 | 195 | 1.7477 |
| 1.4325 | 0.82 | 200 | 1.7389 |
| 1.7383 | 0.84 | 205 | 1.7284 |
| 1.4096 | 0.86 | 210 | 1.7192 |
| 1.3947 | 0.88 | 215 | 1.7159 |
| 1.4394 | 0.9 | 220 | 1.7090 |
| 1.5481 | 0.92 | 225 | 1.7075 |
| 1.4635 | 0.94 | 230 | 1.7117 |
| 1.3564 | 0.96 | 235 | 1.7041 |
| 1.5381 | 0.98 | 240 | 1.6902 |
| 1.2412 | 1.0 | 245 | 1.6838 |
| 1.7424 | 1.02 | 250 | 1.6803 |
| 1.2657 | 1.04 | 255 | 1.6869 |
| 1.2026 | 1.06 | 260 | 1.7028 |
| 0.9746 | 1.08 | 265 | 1.7092 |
| 1.3277 | 1.1 | 270 | 1.7065 |
| 1.741 | 1.12 | 275 | 1.7007 |
| 1.4553 | 1.14 | 280 | 1.6901 |
| 1.4277 | 1.16 | 285 | 1.6786 |
| 1.5373 | 1.18 | 290 | 1.6731 |
| 1.3754 | 1.2 | 295 | 1.6690 |
| 1.8448 | 1.22 | 300 | 1.6428 |
| 1.132 | 1.24 | 305 | 1.6277 |
| 1.1909 | 1.27 | 310 | 1.6236 |
| 1.2459 | 1.29 | 315 | 1.6253 |
| 1.1233 | 1.31 | 320 | 1.6310 |
| 1.1812 | 1.33 | 325 | 1.6327 |
| 1.2173 | 1.35 | 330 | 1.6318 |
| 1.1845 | 1.37 | 335 | 1.6331 |
| 1.4047 | 1.39 | 340 | 1.6198 |
| 1.3456 | 1.41 | 345 | 1.6102 |
| 1.0766 | 1.43 | 350 | 1.5972 |
| 1.434 | 1.45 | 355 | 1.5711 |
| 1.4121 | 1.47 | 360 | 1.5519 |
| 0.991 | 1.49 | 365 | 1.5307 |
| 1.1855 | 1.51 | 370 | 1.5250 |
| 0.9791 | 1.53 | 375 | 1.5176 |
| 1.1704 | 1.55 | 380 | 1.5166 |
| 0.8702 | 1.57 | 385 | 1.5181 |
| 1.1582 | 1.59 | 390 | 1.5084 |
| 1.0805 | 1.61 | 395 | 1.5046 |
| 1.3099 | 1.63 | 400 | 1.4955 |
| 1.2066 | 1.65 | 405 | 1.4818 |
| 1.0825 | 1.67 | 410 | 1.4846 |
| 1.0802 | 1.69 | 415 | 1.4849 |
| 1.7319 | 1.71 | 420 | 1.4855 |
| 1.5408 | 1.73 | 425 | 1.4909 |
| 0.5243 | 1.76 | 430 | 1.4993 |
| 1.0521 | 1.78 | 435 | 1.4943 |
| 1.0145 | 1.8 | 440 | 1.4867 |
| 1.0813 | 1.82 | 445 | 1.4760 |
| 1.1515 | 1.84 | 450 | 1.4462 |
| 0.9266 | 1.86 | 455 | 1.4358 |
| 0.6752 | 1.88 | 460 | 1.4328 |
| 1.1664 | 1.9 | 465 | 1.4342 |
| 1.1168 | 1.92 | 470 | 1.4390 |
| 1.3819 | 1.94 | 475 | 1.4468 |
| 0.9204 | 1.96 | 480 | 1.4451 |
| 0.8669 | 1.98 | 485 | 1.4357 |
| 1.0333 | 2.0 | 490 | 1.4236 |
| 1.0886 | 2.02 | 495 | 1.4128 |
| 1.1797 | 2.04 | 500 | 1.4085 |
| 1.0462 | 2.06 | 505 | 1.4091 |
| 1.009 | 2.08 | 510 | 1.4157 |
| 0.7713 | 2.1 | 515 | 1.4277 |
| 1.1869 | 2.12 | 520 | 1.4372 |
| 0.5705 | 2.14 | 525 | 1.4452 |
| 0.8965 | 2.16 | 530 | 1.4562 |
| 0.6888 | 2.18 | 535 | 1.4563 |
| 0.682 | 2.2 | 540 | 1.4599 |
| 0.8815 | 2.22 | 545 | 1.4600 |
| 0.9211 | 2.24 | 550 | 1.4659 |
| 0.8063 | 2.27 | 555 | 1.4663 |
| 0.6676 | 2.29 | 560 | 1.4635 |
| 1.0024 | 2.31 | 565 | 1.4577 |
| 0.9457 | 2.33 | 570 | 1.4536 |
| 1.0273 | 2.35 | 575 | 1.4480 |
| 0.5464 | 2.37 | 580 | 1.4496 |
| 0.7404 | 2.39 | 585 | 1.4582 |
| 0.7804 | 2.41 | 590 | 1.4659 |
| 0.9942 | 2.43 | 595 | 1.4701 |
| 0.9433 | 2.45 | 600 | 1.4730 |
| 0.8804 | 2.47 | 605 | 1.4688 |
| 0.7836 | 2.49 | 610 | 1.4657 |
| 0.7613 | 2.51 | 615 | 1.4588 |
| 0.8007 | 2.53 | 620 | 1.4565 |
| 0.7768 | 2.55 | 625 | 1.4501 |
| 0.9832 | 2.57 | 630 | 1.4430 |
| 0.7297 | 2.59 | 635 | 1.4410 |
| 0.8646 | 2.61 | 640 | 1.4440 |
| 1.1847 | 2.63 | 645 | 1.4449 |
| 0.7582 | 2.65 | 650 | 1.4397 |
| 1.024 | 2.67 | 655 | 1.4312 |
| 0.6909 | 2.69 | 660 | 1.4297 |
| 0.9462 | 2.71 | 665 | 1.4311 |
| 0.6868 | 2.73 | 670 | 1.4344 |
| 0.9798 | 2.76 | 675 | 1.4380 |
| 1.2549 | 2.78 | 680 | 1.4392 |
| 0.5431 | 2.8 | 685 | 1.4394 |
| 0.7168 | 2.82 | 690 | 1.4391 |
| 0.8719 | 2.84 | 695 | 1.4390 |
| 0.6935 | 2.86 | 700 | 1.4360 |
| 0.7472 | 2.88 | 705 | 1.4229 |
| 0.7485 | 2.9 | 710 | 1.4085 |
| 0.8291 | 2.92 | 715 | 1.3977 |
| 0.8684 | 2.94 | 720 | 1.3934 |
| 0.7158 | 2.96 | 725 | 1.3930 |
| 0.9039 | 2.98 | 730 | 1.3936 |
| 0.6393 | 3.0 | 735 | 1.3934 |
| 0.5457 | 3.02 | 740 | 1.3917 |
| 1.0716 | 3.04 | 745 | 1.3922 |
| 0.5797 | 3.06 | 750 | 1.3908 |
| 0.5073 | 3.08 | 755 | 1.3910 |
| 0.5619 | 3.1 | 760 | 1.3925 |
| 0.7002 | 3.12 | 765 | 1.3947 |
| 0.9512 | 3.14 | 770 | 1.3974 |
| 0.6535 | 3.16 | 775 | 1.3992 |
| 0.3872 | 3.18 | 780 | 1.4014 |
| 0.6217 | 3.2 | 785 | 1.4050 |
| 0.6864 | 3.22 | 790 | 1.4015 |
| 0.4067 | 3.24 | 795 | 1.3966 |
| 0.4893 | 3.27 | 800 | 1.3939 |
| 0.5004 | 3.29 | 805 | 1.3951 |
| 0.9775 | 3.31 | 810 | 1.3962 |
| 0.9014 | 3.33 | 815 | 1.3970 |
| 0.8747 | 3.35 | 820 | 1.3975 |
| 0.7479 | 3.37 | 825 | 1.3982 |
| 0.5784 | 3.39 | 830 | 1.3987 |
| 0.7599 | 3.41 | 835 | 1.4003 |
| 0.425 | 3.43 | 840 | 1.4019 |
| 0.5207 | 3.45 | 845 | 1.4032 |
| 0.8591 | 3.47 | 850 | 1.4040 |
| 0.5839 | 3.49 | 855 | 1.4042 |
| 0.7019 | 3.51 | 860 | 1.4045 |
| 0.4606 | 3.53 | 865 | 1.4044 |
| 0.8912 | 3.55 | 870 | 1.4044 |
| 0.6471 | 3.57 | 875 | 1.4047 |
| 0.5152 | 3.59 | 880 | 1.4050 |
| 0.4845 | 3.61 | 885 | 1.4039 |
| 0.6449 | 3.63 | 890 | 1.4031 |
| 0.7303 | 3.65 | 895 | 1.4025 |
| 0.4894 | 3.67 | 900 | 1.4022 |
| 0.6502 | 3.69 | 905 | 1.4021 |
| 0.8449 | 3.71 | 910 | 1.4020 |
| 0.7148 | 3.73 | 915 | 1.4019 |
| 0.7008 | 3.76 | 920 | 1.4019 |
| 0.5209 | 3.78 | 925 | 1.4018 |
| 1.022 | 3.8 | 930 | 1.4016 |
| 0.8529 | 3.82 | 935 | 1.4013 |
| 0.4514 | 3.84 | 940 | 1.4014 |
| 0.5137 | 3.86 | 945 | 1.4016 |
| 0.9131 | 3.88 | 950 | 1.4016 |
| 0.5213 | 3.9 | 955 | 1.4017 |
| 0.5542 | 3.92 | 960 | 1.4018 |
| 0.9475 | 3.94 | 965 | 1.4019 |
| 0.6425 | 3.96 | 970 | 1.4019 |
| 0.886 | 3.98 | 975 | 1.4019 |
| 0.7525 | 4.0 | 980 | 1.4019 |
| 0.4966 | 4.02 | 985 | 1.4019 |
| 0.6851 | 4.04 | 990 | 1.4019 |
| 0.7414 | 4.06 | 995 | 1.4019 |
| 0.4963 | 4.08 | 1000 | 1.4019 |
### Framework versions
- Transformers 4.34.0
- Pytorch 2.1.0
- Datasets 2.14.5
- Tokenizers 0.14.1
| null | null | null | null | null | null | null | null | null | null | null | null | pechaut/Mistral-7b-instruct-cairo-PEFT | [
-0.8000923991203308,
-0.4215887784957886,
0.38346830010414124,
0.21594274044036865,
0.0663018599152565,
0.018039468675851822,
0.17793813347816467,
-0.04057279974222183,
0.7705769538879395,
0.37607142329216003,
-0.6047604084014893,
-0.6901836395263672,
-0.7019065022468567,
-0.13730914890766... |
unionai/FlyteLlama-v0-7b-hf | unionai | 2023-11-29T20:32:28Z | 0 | 0 | null | [
"peft",
"region:us"
] | 2023-11-29T20:32:28Z | 2023-10-21T14:28:41.000Z | null | null | ---
library_name: peft
base_model: codellama/CodeLlama-7b-hf
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
## Training procedure
The following `bitsandbytes` quantization config was used during training:
- quant_method: bitsandbytes
- load_in_8bit: False
- load_in_4bit: True
- llm_int8_threshold: 6.0
- llm_int8_skip_modules: None
- llm_int8_enable_fp32_cpu_offload: True
- llm_int8_has_fp16_weight: False
- bnb_4bit_quant_type: nf4
- bnb_4bit_use_double_quant: True
- bnb_4bit_compute_dtype: bfloat16
### Framework versions
- PEFT 0.6.2
| null | peft | null | null | null | null | null | null | null | null | null | null | unionai/FlyteLlama-v0-7b-hf | [
-0.5754634737968445,
-0.5512514114379883,
0.4031495153903961,
0.08076301217079163,
-0.25524622201919556,
-0.2778446674346924,
0.055489420890808105,
-0.5390483736991882,
0.04751100018620491,
0.6137179732322693,
-0.7278336882591248,
-0.6274861097335815,
-0.5582624673843384,
-0.08162551373243... |
R7x/007 | R7x | 2023-11-29T12:10:02Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T12:10:02Z | 2023-10-22T08:33:36.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | R7x/007 | [
-0.32276463508605957,
-0.2256849706172943,
0.8622266054153442,
0.4346153736114502,
-0.5282987952232361,
0.7012974619865417,
0.7915722131729126,
0.07618652284145355,
0.7746030688285828,
0.2563217282295227,
-0.7852814793586731,
-0.22573867440223694,
-0.9104479551315308,
0.571567177772522,
... |
XpucT/Deliberate | XpucT | 2023-11-29T07:46:11Z | 0 | 147 | null | [
"has_space",
"region:us"
] | 2023-11-29T07:46:11Z | 2023-10-24T07:56:43.000Z | null | null | # DELIBERATE

# The shorter the prompt – the better the result
- You can now forget about `extremely detailed`, `8k`, `hyperdetailed`, `masterpiece`, etc. You can still use them, but it's not necessary, because you'll get a masterpiece anyway. Just take a look at the demo images and their EXIF data.</br>
*Masterpiece tip*:</br>
If you still gonna use `masterpiece` token in your Prompt, trust me, the real power of this token you can see only if you keep your Negative prompt empty.
# Trigger Words
- You can boost the result by adding the `mj` token (at the start of the prompt), `cozy` or `cinematic`. You probably guess what `mj` means and how the magic works. So you have it.
# Cinematic
- I've completely revamped the `cinematic` token. As you can see, Deliberate v3 always strives to find reflections and create stunning results (without crazy prompting), much like Midjourney does.
At the same time, sometimes we want to achieve the highest level that no photographer can reach. That's why I've redefined the interpretation of `cinematic`.
# NSFW
- Oh yeah, I've rebuilt many of the NSFW tokens to deliver perfect results with just one word. You can find all the details on my [Boosty page](https://boosty.to/xpuct "Author's Boosty page").
# Dataset
- The dataset is truly enormous. In fact, this is the first public model on the internet, where the selection of images was stricter than anywhere else, including Midjourney. Deliberate v3 can work without negatives and still produce masterpieces. This became possible precisely because of the huge dataset. It's unique, it's massive, and it includes only perfect images. Just try typing any word, exclude the negatives, and you'll see that Deliberate knows what to show you without randomness. Even when you're trying to get some ugly result, it still provides you with a spectacular one.
# License
- The license type is **<span style="color: #e72d7d;">CC BY-NC-ND 4.0</span>** [(?)](https://creativecommons.org/licenses/by-nc-nd/4.0 "About license"). If you want to use Deliberate on your service and/or for commercial purposes, message me on [Discord](https://boosty.to/xpuct "AI Diffusion (Discord server)"). If you are an individual, not a commercial organization, there are no restrictions for you. The philosophy is very simple – private individuals provide support for the model, not companies, so private individuals can do anything.
# Easter Egg
- Now you can stop arguing with people to prove what the best sampler in the world is. You can prompt my nickname on a 512 x 512 and see how good the image looks, count how many fingers the girl has, and check if there is correct sign text or not.
From now on, you can just run batch Euler a and UniPC, for example, and see who make the job better.
# Have fun ❤
---
license: cc-by-nc-nd-4.0
---
| null | null | null | null | null | null | null | null | null | null | null | null | XpucT/Deliberate | [
-0.6030281186103821,
-1.0269509553909302,
0.5972225069999695,
0.34957924485206604,
-0.6014418601989746,
0.026054129004478455,
-0.18005500733852386,
-0.6319873929023743,
0.5627668499946594,
0.48452267050743103,
-0.8457461595535278,
-0.49490463733673096,
-0.5561385154724121,
0.24824313819408... |
SteffRhes/APIS_OEBL__NER_lg | SteffRhes | 2023-11-29T16:18:01Z | 0 | 0 | null | [
"spacy",
"token-classification",
"de",
"dataset:SteffRhes/APIS_OEBL__Named_Entity_Recognition",
"license:mit",
"region:us"
] | 2023-11-29T16:18:01Z | 2023-10-25T15:59:32.000Z | null | null | ---
license: mit
language:
- de
library_name: spacy
pipeline_tag: token-classification
datasets:
- SteffRhes/APIS_OEBL__Named_Entity_Recognition
---
# APIS ÖBL NER
A spaCy NER model, based on [de_core_news_lg-3.6.0](https://github.com/explosion/spacy-models/releases/tag/de_core_news_lg-3.6.0), trained on data originating from the [APIS project](https://www.oeaw.ac.at/acdh/projects/completed-projects/apis) and the [Austrian Biographical Lexicon (ÖBL)](https://www.oeaw.ac.at/acdh/oebl).
data split:
- train: 80%
- dev: 10%
- eval: 10%
Scores:
- P: 77.99
- R: 79.85
- F: 78.91
| null | spacy | token-classification | null | null | null | null | null | null | null | null | null | SteffRhes/APIS_OEBL__NER_lg | [
-0.7226303219795227,
-0.7852308750152588,
0.40543437004089355,
-0.05218321457505226,
0.010877999477088451,
-0.1448233276605606,
-0.13297387957572937,
-0.46024587750434875,
0.24589915573596954,
0.9678976535797119,
-0.6691955327987671,
-0.9158424139022827,
-0.5298560857772827,
0.266820758581... |
csukuangfj/vits-piper-en_US-lessac-medium | csukuangfj | 2023-11-29T07:58:07Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:58:07Z | 2023-10-26T05:19:13.000Z | null | null | # Introduction
Please refer to
https://colab.research.google.com/drive/1PScLJV3sbUUAOiptLO7Ixlzh9XnWWoYZ?usp=sharing
about how files in the repo are generated. | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-lessac-medium | [
-0.5210545659065247,
-0.45010411739349365,
0.27755796909332275,
0.150758296251297,
-0.06449918448925018,
0.28841692209243774,
0.21070365607738495,
-0.04216140881180763,
0.34625059366226196,
0.7494074106216431,
-0.7866718173027039,
-0.211751326918602,
-0.1970527470111847,
0.3795417845249176... |
csukuangfj/vits-piper-de_DE-thorsten-medium | csukuangfj | 2023-11-29T06:22:57Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:57Z | 2023-10-26T13:47:46.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-thorsten-medium | [
-0.3227651119232178,
-0.22568456828594208,
0.8622261881828308,
0.43461447954177856,
-0.5282989740371704,
0.7012965083122253,
0.7915719747543335,
0.0761861652135849,
0.7746025323867798,
0.25632235407829285,
-0.7852817177772522,
-0.22573819756507874,
-0.9104477763175964,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-thorsten-low | csukuangfj | 2023-11-29T06:22:53Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:53Z | 2023-10-27T03:21:08.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-thorsten-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-thorsten-high | csukuangfj | 2023-11-29T06:23:01Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:23:01Z | 2023-10-27T03:58:29.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-thorsten-high | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-eva_k-x_low | csukuangfj | 2023-11-29T06:22:46Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:46Z | 2023-10-27T06:41:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-eva_k-x_low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-karlsson-low | csukuangfj | 2023-11-29T06:22:51Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:51Z | 2023-10-27T06:41:54.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-karlsson-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-kerstin-low | csukuangfj | 2023-11-29T06:22:52Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:52Z | 2023-10-27T06:42:27.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-kerstin-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-pavoque-low | csukuangfj | 2023-11-29T06:22:52Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:52Z | 2023-10-27T06:42:50.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-pavoque-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-ramona-low | csukuangfj | 2023-11-29T06:22:54Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:54Z | 2023-10-27T06:43:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-ramona-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-de_DE-thorsten_emotional-medium | csukuangfj | 2023-11-29T06:22:56Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T06:22:56Z | 2023-10-27T06:43:47.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-de_DE-thorsten_emotional-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-amy-low | csukuangfj | 2023-11-29T07:57:15Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:15Z | 2023-10-27T07:09:38.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-amy-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-amy-medium | csukuangfj | 2023-11-29T07:57:19Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:19Z | 2023-10-27T07:10:03.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-amy-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-arctic-medium | csukuangfj | 2023-11-29T07:57:23Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:23Z | 2023-10-27T07:10:35.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-arctic-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-danny-low | csukuangfj | 2023-11-29T07:57:19Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:19Z | 2023-10-27T07:11:18.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-danny-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-hfc_male-medium | csukuangfj | 2023-11-29T07:57:14Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:14Z | 2023-10-27T07:11:42.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-hfc_male-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-joe-medium | csukuangfj | 2023-11-29T07:57:23Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:23Z | 2023-10-27T07:13:24.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-joe-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-kusal-medium | csukuangfj | 2023-11-29T07:57:20Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:20Z | 2023-10-27T07:14:26.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-kusal-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-l2arctic-medium | csukuangfj | 2023-11-29T07:57:23Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:23Z | 2023-10-27T07:15:21.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-l2arctic-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-lessac-low | csukuangfj | 2023-11-29T07:57:15Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:15Z | 2023-10-27T07:18:06.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-lessac-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-lessac-high | csukuangfj | 2023-11-29T07:57:23Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:23Z | 2023-10-27T07:18:37.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-lessac-high | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-libritts_r-medium | csukuangfj | 2023-11-29T07:58:11Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:58:11Z | 2023-10-27T07:29:14.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-libritts_r-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-ryan-low | csukuangfj | 2023-11-29T07:58:14Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:58:14Z | 2023-10-27T07:33:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-ryan-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-ryan-medium | csukuangfj | 2023-11-29T07:58:16Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:58:16Z | 2023-10-27T07:33:30.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-ryan-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-ryan-high | csukuangfj | 2023-11-29T07:58:23Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:58:23Z | 2023-10-27T07:33:40.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-ryan-high | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_US-kathleen-low | csukuangfj | 2023-11-29T07:57:29Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:29Z | 2023-10-27T08:54:03.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_US-kathleen-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-alan-low | csukuangfj | 2023-11-29T07:57:09Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:09Z | 2023-10-27T09:35:37.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-alan-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-alan-medium | csukuangfj | 2023-11-29T07:57:04Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:04Z | 2023-10-27T09:35:52.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-alan-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-aru-medium | csukuangfj | 2023-11-29T07:57:05Z | 0 | 0 | null | [
"onnx",
"region:us"
] | 2023-11-29T07:57:05Z | 2023-10-27T09:37:06.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-aru-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-jenny_dioco-medium | csukuangfj | 2023-11-29T07:57:02Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:02Z | 2023-10-27T09:37:33.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-jenny_dioco-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-northern_english_male-medium | csukuangfj | 2023-11-29T07:57:05Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:05Z | 2023-10-27T09:38:46.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-northern_english_male-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-semaine-medium | csukuangfj | 2023-11-29T07:57:08Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:08Z | 2023-10-27T09:39:11.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-semaine-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-southern_english_female-low | csukuangfj | 2023-11-29T07:57:09Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:09Z | 2023-10-27T09:39:43.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-southern_english_female-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-vctk-medium | csukuangfj | 2023-11-29T07:57:05Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:05Z | 2023-10-27T09:39:58.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-vctk-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-en_GB-alba-medium | csukuangfj | 2023-11-29T07:57:11Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T07:57:11Z | 2023-10-27T10:02:40.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-en_GB-alba-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-zh_CN-huayan-medium | csukuangfj | 2023-11-29T13:58:53Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T13:58:53Z | 2023-10-27T10:14:47.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-zh_CN-huayan-medium | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
csukuangfj/vits-piper-es_ES-carlfm-x_low | csukuangfj | 2023-11-29T09:58:52Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:58:52Z | 2023-10-28T00:27:12.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_ES-carlfm-x_low | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
sherryzha/falcon-rewrite-581-smallset | sherryzha | 2023-11-29T20:20:01Z | 0 | 0 | null | [
"generated_from_trainer",
"base_model:tiiuae/falcon-7b",
"license:apache-2.0",
"region:us"
] | 2023-11-29T20:20:01Z | 2023-10-28T00:53:21.000Z | null | null | ---
license: apache-2.0
base_model: tiiuae/falcon-7b
tags:
- generated_from_trainer
model-index:
- name: falcon-rewrite-581-smallset
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# falcon-rewrite-581-smallset
This model is a fine-tuned version of [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8029
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0005
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.03
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.4551 | 0.04 | 10 | 2.4085 |
| 1.4203 | 0.08 | 20 | 2.3957 |
| 1.6967 | 0.13 | 30 | 2.1396 |
| 1.5897 | 0.17 | 40 | 2.2547 |
| 1.4068 | 0.21 | 50 | 2.2484 |
| 2.4724 | 0.25 | 60 | 2.1803 |
| 1.6358 | 0.3 | 70 | 2.0363 |
| 1.7565 | 0.34 | 80 | 2.3048 |
| 1.9326 | 0.38 | 90 | 2.2237 |
| 1.4726 | 0.42 | 100 | 1.9666 |
| 2.6885 | 0.47 | 110 | 2.0889 |
| 1.7221 | 0.51 | 120 | 2.0868 |
| 2.7958 | 0.55 | 130 | 2.0033 |
| 1.6916 | 0.59 | 140 | 2.0824 |
| 0.9544 | 0.64 | 150 | 2.1048 |
| 1.8363 | 0.68 | 160 | 1.9306 |
| 2.2805 | 0.72 | 170 | 2.0956 |
| 2.2398 | 0.76 | 180 | 2.1049 |
| 1.0369 | 0.81 | 190 | 2.0348 |
| 0.9023 | 0.85 | 200 | 1.9050 |
| 1.1511 | 0.89 | 210 | 2.0645 |
| 1.1805 | 0.93 | 220 | 1.9241 |
| 1.0402 | 0.97 | 230 | 2.0542 |
| 1.1116 | 1.02 | 240 | 1.9184 |
| 2.2786 | 1.06 | 250 | 2.0039 |
| 1.5313 | 1.1 | 260 | 1.9232 |
| 1.0101 | 1.14 | 270 | 1.8365 |
| 1.3444 | 1.19 | 280 | 1.9812 |
| 1.9973 | 1.23 | 290 | 2.0105 |
| 1.001 | 1.27 | 300 | 1.8029 |
| 1.5717 | 1.31 | 310 | 1.8190 |
| 1.7842 | 1.36 | 320 | 2.0034 |
| 0.841 | 1.4 | 330 | 2.0345 |
| 0.9212 | 1.44 | 340 | 1.9301 |
| 1.6442 | 1.48 | 350 | 2.0139 |
| 2.0697 | 1.53 | 360 | 2.1341 |
| 1.0772 | 1.57 | 370 | 2.0179 |
| 1.4152 | 1.61 | 380 | 1.8754 |
| 0.912 | 1.65 | 390 | 1.9178 |
| 1.7032 | 1.69 | 400 | 1.9235 |
### Framework versions
- Transformers 4.32.1
- Pytorch 2.0.1
- Datasets 2.14.5
- Tokenizers 0.13.3
| null | null | null | null | null | null | null | null | null | null | null | null | sherryzha/falcon-rewrite-581-smallset | [
-0.7200640439987183,
-0.5861296057701111,
0.14098778367042542,
0.11472553759813309,
-0.036937419325113297,
-0.17118117213249207,
0.10944897681474686,
-0.05274061858654022,
0.6652015447616577,
0.3670010268688202,
-0.8500473499298096,
-0.6130642890930176,
-0.743188738822937,
-0.1363457590341... |
MTNProductions/MyLittlePony_RVC2 | MTNProductions | 2023-11-29T13:42:58Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T13:42:58Z | 2023-10-28T01:27:58.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | MTNProductions/MyLittlePony_RVC2 | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
csukuangfj/vits-piper-es_ES-davefx-medium | csukuangfj | 2023-11-29T09:58:59Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:58:59Z | 2023-10-28T02:09:55.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_ES-davefx-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-es_ES-mls_10246-low | csukuangfj | 2023-11-29T09:59:05Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:59:05Z | 2023-10-28T02:10:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_ES-mls_10246-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-es_ES-mls_9972-low | csukuangfj | 2023-11-29T09:58:59Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:58:59Z | 2023-10-28T02:11:01.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_ES-mls_9972-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-es_ES-sharvard-medium | csukuangfj | 2023-11-29T09:59:01Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:59:01Z | 2023-10-28T02:12:00.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_ES-sharvard-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-es_MX-ald-medium | csukuangfj | 2023-11-29T09:59:32Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T09:59:32Z | 2023-10-28T02:14:40.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-es_MX-ald-medium | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-fr_FR-gilles-low | csukuangfj | 2023-11-29T08:44:40Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T08:44:40Z | 2023-10-28T04:29:39.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-fr_FR-gilles-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-fr_FR-mls_1840-low | csukuangfj | 2023-11-29T08:44:43Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T08:44:43Z | 2023-10-28T04:30:46.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-fr_FR-mls_1840-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-fr_FR-siwis-low | csukuangfj | 2023-11-29T08:44:31Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T08:44:31Z | 2023-10-28T04:32:33.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-fr_FR-siwis-low | [
-0.3227648437023163,
-0.22568459808826447,
0.8622260093688965,
0.434614896774292,
-0.5282989144325256,
0.7012966275215149,
0.7915716171264648,
0.07618634402751923,
0.7746022343635559,
0.25632208585739136,
-0.7852813005447388,
-0.22573812305927277,
-0.9104481935501099,
0.5715669393539429,
... |
csukuangfj/vits-piper-fr_FR-siwis-medium | csukuangfj | 2023-11-29T08:44:39Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T08:44:39Z | 2023-10-28T04:33:05.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-fr_FR-siwis-medium | [
-0.32276490330696106,
-0.22568461298942566,
0.862226128578186,
0.43461498618125916,
-0.5282989740371704,
0.7012966871261597,
0.7915717363357544,
0.07618622481822968,
0.7746026515960693,
0.25632232427597046,
-0.785281777381897,
-0.22573840618133545,
-0.9104479551315308,
0.5715670585632324,
... |
csukuangfj/vits-piper-fr_FR-upmc-medium | csukuangfj | 2023-11-29T08:44:48Z | 0 | 0 | null | [
"onnx",
"has_space",
"region:us"
] | 2023-11-29T08:44:48Z | 2023-10-28T04:33:53.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | csukuangfj/vits-piper-fr_FR-upmc-medium | [
-0.32276490330696106,
-0.22568461298942566,
0.862226128578186,
0.43461498618125916,
-0.5282989740371704,
0.7012966871261597,
0.7915717363357544,
0.07618622481822968,
0.7746026515960693,
0.25632232427597046,
-0.785281777381897,
-0.22573840618133545,
-0.9104479551315308,
0.5715670585632324,
... |
matgu23/lrwg | matgu23 | 2023-11-29T02:39:48Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T02:39:48Z | 2023-10-31T23:52:17.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | matgu23/lrwg | [
-0.32276490330696106,
-0.22568461298942566,
0.862226128578186,
0.43461498618125916,
-0.5282989740371704,
0.7012966871261597,
0.7915717363357544,
0.07618622481822968,
0.7746026515960693,
0.25632232427597046,
-0.785281777381897,
-0.22573840618133545,
-0.9104479551315308,
0.5715670585632324,
... |
minhbtc/bkai-llama2-7b-chat | minhbtc | 2023-11-29T10:31:08Z | 0 | 0 | null | [
"tensorboard",
"generated_from_trainer",
"base_model:bkai-foundation-models/vietnamese-llama2-7b-40GB",
"license:other",
"region:us"
] | 2023-11-29T10:31:08Z | 2023-11-01T04:54:38.000Z | null | null | ---
license: other
base_model: bkai-foundation-models/vietnamese-llama2-7b-40GB
tags:
- generated_from_trainer
model-index:
- name: bkai-llama2-7b-chat
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bkai-llama2-7b-chat
This model is a fine-tuned version of [bkai-foundation-models/vietnamese-llama2-7b-40GB](https://huggingface.co/bkai-foundation-models/vietnamese-llama2-7b-40GB) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 9.1e-05
- train_batch_size: 4
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 8
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.05
- training_steps: 10000
### Framework versions
- Transformers 4.33.0
- Pytorch 2.0.0
- Datasets 2.1.0
- Tokenizers 0.13.3
| null | null | null | null | null | null | null | null | null | null | null | null | minhbtc/bkai-llama2-7b-chat | [
-0.3353610634803772,
-0.7118687629699707,
0.18586573004722595,
0.4081166982650757,
-0.6855742335319519,
-0.23713892698287964,
0.026790793985128403,
-0.37612056732177734,
0.2476772665977478,
0.5261938571929932,
-0.5694802403450012,
-0.4782784581184387,
-0.7302473783493042,
0.009838080033659... |
kkboy1/Mistral_train | kkboy1 | 2023-11-29T09:07:19Z | 0 | 0 | null | [
"tensorboard",
"generated_from_trainer",
"base_model:kkboy1/Mistral-7B-v0.1-sharded",
"region:us"
] | 2023-11-29T09:07:19Z | 2023-11-02T00:51:36.000Z | null | null | ---
base_model: kkboy1/Mistral-7B-v0.1-sharded
tags:
- generated_from_trainer
model-index:
- name: Mistral_train
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Mistral_train
This model is a fine-tuned version of [kkboy1/Mistral-7B-v0.1-sharded](https://huggingface.co/kkboy1/Mistral-7B-v0.1-sharded) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-07
- train_batch_size: 2
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 8
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 2.0
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.35.2
- Pytorch 2.1.0+cu118
- Datasets 2.15.0
- Tokenizers 0.15.0
| null | null | null | null | null | null | null | null | null | null | null | null | kkboy1/Mistral_train | [
-0.4446302056312561,
-0.6488770246505737,
0.012449857778847218,
0.2671469449996948,
-0.5255323052406311,
-0.5123811960220337,
0.03632108494639397,
-0.27230483293533325,
0.1327245831489563,
0.5300803780555725,
-0.7412327527999878,
-0.4476461112499237,
-0.7455970048904419,
-0.199210122227668... |
Herocat/opt-350m-finetuned-wikitext2 | Herocat | 2023-11-29T13:10:27Z | 0 | 0 | null | [
"transformers",
"tensorboard",
"safetensors",
"opt",
"text-generation",
"generated_from_trainer",
"base_model:facebook/opt-350m",
"license:other",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | 2023-11-29T13:10:27Z | 2023-11-02T13:23:00.000Z | null | null | ---
license: other
base_model: facebook/opt-350m
tags:
- generated_from_trainer
model-index:
- name: opt-350m-finetuned-wikitext2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opt-350m-finetuned-wikitext2
This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on the None dataset.
It achieves the following results on the evaluation set:
- eval_loss: 3.0919
- eval_runtime: 51.3961
- eval_samples_per_second: 19.048
- eval_steps_per_second: 2.393
- epoch: 3.04
- step: 3602
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Framework versions
- Transformers 4.35.2
- Pytorch 2.1.1+cu121
- Datasets 2.15.0
- Tokenizers 0.15.0
| null | transformers | text-generation | null | null | null | null | null | null | null | null | null | Herocat/opt-350m-finetuned-wikitext2 | [
-0.5774496793746948,
-0.7449148893356323,
0.16538110375404358,
0.19759638607501984,
-0.3274731934070587,
-0.41251152753829956,
-0.2190226912498474,
-0.297059565782547,
0.2117004692554474,
0.3681313395500183,
-0.90828537940979,
-0.411394327878952,
-0.4068322479724884,
-0.09651510417461395,
... |
b74444/style1 | b74444 | 2023-11-29T16:03:07Z | 0 | 1 | null | [
"region:us"
] | 2023-11-29T16:03:07Z | 2023-11-03T05:25:40.000Z | null | null | 1676754219892798188 trigger word: stl1 |||||
Style1-000014 trigger word: Style |||||
StarTails trigger word: StarTails |||||
StarTail trigger word: StarTail |||||
pl1-000002 trigger word: polaroid weight 0.4~0.8 |||||
| null | null | null | null | null | null | null | null | null | null | null | null | b74444/style1 | [
-0.4085506796836853,
-0.2419251799583435,
0.4377003014087677,
0.6525871157646179,
-0.0671469122171402,
-0.0974196121096611,
0.37061911821365356,
-0.5547966957092285,
0.45639804005622864,
0.2087920755147934,
-0.5636134743690491,
-0.8115641474723816,
-0.7364427447319031,
-0.2196771800518036,... |
aiflows/ChatInteractiveFlowModule | aiflows | 2023-11-29T14:52:58Z | 0 | 0 | null | [
"license:mit",
"region:us"
] | 2023-11-29T14:52:58Z | 2023-11-03T17:43:50.000Z | null | null | ---
license: mit
---
# Table of Contents
* [ChatHumanFlowModule](#ChatHumanFlowModule)
* [ChatHumanFlowModule](#ChatHumanFlowModule.ChatHumanFlowModule)
* [type](#ChatHumanFlowModule.ChatHumanFlowModule.type)
<a id="ChatHumanFlowModule"></a>
# ChatHumanFlowModule
<a id="ChatHumanFlowModule.ChatHumanFlowModule"></a>
## ChatHumanFlowModule Objects
```python
class ChatHumanFlowModule(CircularFlow)
```
This class implements a Chat Human Flow Module. It is a flow that consists of two sub-flows that are executed circularly. It Contains the following subflows:
- A User Flow: A flow makes queries to the Assistant Flow. E.g. The user asks the assistant (LLM) a question.
- A Assistant Flow: A flow that responds to queries made by the User Flow. E.g. The assistant (LLM) answers the user's question.
To end the interaction, the user must type "\<END\>"
An illustration of the flow is as follows:
|------> User Flow -----------> |
^ |
| |
| v
|<------ Assistant Flow <-------|
*Configuration Parameters*:
- `name` (str): The name of the flow. Default: "ChatHumanFlowModule"
- `description` (str): A description of the flow. This description is used to generate the help message of the flow.
Default: "Flow that enables chatting between a ChatAtomicFlow and a user providing the input."
- `max_rounds` (int): The maximum number of rounds the flow can run for. Default: None, which means that there is no limit on the number of rounds.
- `early_exit_key` (str): The key that is used to exit the flow. Default: "end_of_interaction"
- `subflows_config` (Dict[str,Any]): A dictionary of subflows configurations. Default:
- `Assistant Flow`: The configuration of the Assistant Flow. By default, it a ChatAtomicFlow. It default parmaters are defined in ChatAtomicFlowModule.
- `User Flow`: The configuration of the User Flow. By default, it a HumanStandardInputFlow. It default parmaters are defined in HumanStandardInputFlowModule.
- `topology` (str): (List[Dict[str,Any]]): The topology of the flow which is "circular".
By default, the topology is the one shown in the illustration above (the topology is also described in ChatHumanFlowModule.yaml).
*Input Interface*:
- None. By default, the input interface doesn't expect any input.
*Output Interface*:
- `end_of_interaction` (bool): Whether the interaction is finished or not.
**Arguments**:
- `\**kwargs` (`Dict[str, Any]`): Arguments to be passed to the parent class CircularFlow constructor.
<a id="ChatHumanFlowModule.ChatHumanFlowModule.type"></a>
#### type
```python
@classmethod
def type(cls)
```
This method returns the type of the flow.
| null | null | null | null | null | null | null | null | null | null | null | null | aiflows/ChatInteractiveFlowModule | [
-0.43658339977264404,
-0.9272976517677307,
0.19581787288188934,
0.15103955566883087,
-0.21627257764339447,
-0.011455623432993889,
0.05434126406908035,
-0.3171617388725281,
0.37313488125801086,
0.7460111379623413,
-0.9120983481407166,
-0.29327699542045593,
-0.46405982971191406,
0.1777792870... |
liuyao/QLNet | liuyao | 2023-11-29T15:05:11Z | 0 | 0 | null | [
"timm",
"ResNet",
"CNN",
"PDE",
"image-classification",
"dataset:imagenet-1k",
"arxiv:2308.01621",
"license:apache-2.0",
"region:us"
] | 2023-11-29T15:05:11Z | 2023-11-06T08:40:44.000Z | null | null | ---
datasets:
- imagenet-1k
library_name: timm
license: apache-2.0
pipeline_tag: image-classification
metrics:
- accuracy
tags:
- ResNet
- CNN
- PDE
---
# Model Card for Model ID
Based on a class of partial differential equations called **quasi-linear hyperbolic systems** [[Liu et al, 2023](https://github.com/liuyao12/ConvNets-PDE-perspective)], the QLNet makes an entry into uncharted waters of ConvNet model space marked by the use of (element-wise) multiplication in lieu of ReLU as the primary nonlinearity. It achieves comparable performance as ResNet50 on ImageNet-1k (acc=**78.4**), demonstrating that it has the same level of capacity/expressivity, and deserves more analysis and study (hyper-paremeter tuning, optimizer, etc.) by the academic community.

One notable feature is that the architecture (trained or not) admits a *continuous* symmetry in its parameters. Check out the [notebook](https://colab.research.google.com/#fileId=https://huggingface.co/liuyao/QLNet/blob/main/QLNet_symmetry.ipynb) for a demo that makes a particular transformation on the weights while leaving the output *unchanged*.
FAQ (as the author imagines):
- Q: Who needs another ConvNet, when the SOTA for ImageNet-1k is now in the low 80s with models of comparable size?
- A: Aside from shortage of resources to perform extensive experiments, the real answer is that the new symmetry has the potential to be exploited (e.g., symmetry-aware optimization). The non-activation nonlinearity does have more "naturalness" (coordinate independence) that is innate in many equations in mathematics and physics. Activation is but a legacy from the early days of models inspired by *biological* neural networks.
- Q: Multiplication is too simple, someone must have tried it?
- A: Perhaps. My bet is whoever tried it soon found the model fail to train with standard ReLU. Without the belief in the underlying PDE perspective, maybe it wasn't pushed to its limit.
- Q: Is it not similar to attention in Transformer?
- A: It is, indeed. It's natural to wonder if the activation functions in Transformer could be removed (or reduced) while still achieve comparable performance.
- Q: If the weight/parameter space has a symmetry (other than permutations), perhaps there's redundancy in the weights.
- A: The transformation in the demo indeed can be used to reduce the weights from the get-go. However, there are variants of the model that admit an even large symmetry that would be hard to remove. It is also related to the phenomenon of "flat minima" found empirically in conventional deep neural networks.
*This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).*
## Model Details
### Model Description
Instead of the `bottleneck` block of ResNet50 which consists of 1x1, 3x3, 1x1 in succession, this simplest version of QLNet does a 1x1, splits into two equal halves and **multiplies** them, then applies a 3x3 (depthwise), and a 1x1, *all without activation functions* except at the end of the block, where a "radial" activation function that we call `hardball` is applied.
- **Developed by:** Yao Liu 刘杳
- **Model type:** Convolutional Neural Network (ConvNet)
- **License:** As academic work, it is free for all to use. It is a natural progression from the origianl ConvNet (of LeCun) and ResNet, with "depthwise" from MobileNet.
- **Finetuned from model:** N/A (*trained from scratch*)
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [ConvNet from the PDE perspective](https://github.com/liuyao12/ConvNets-PDE-perspective)
- **Paper:** [A Novel ConvNet Architecture with a Continuous Symmetry](https://arxiv.org/abs/2308.01621)
- **Demo:** [More Information Needed]
## How to Get Started with the Model
Use the code below to get started with the model.
```python
import torch, timm
from qlnet import QLNet
model = QLNet()
model.load_state_dict(torch.load('qlnet-50-v0.pth.tar')['state_dict'])
model.eval()
```
## Training Details
### Training and Testing Data
ImageNet-1k
[More Information Needed]
### Training Procedure
We use the training script in `timm`
```
python3 train.py ../datasets/imagenet/ --model resnet50 --num-classes 1000 --lr 0.1 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce
```
### Results
qlnet-50-v0: acc=78.40 | null | timm | image-classification | null | null | null | null | null | null | null | null | null | liuyao/QLNet | [
-0.37959590554237366,
-0.2827339768409729,
-0.034913912415504456,
0.10019655525684357,
-0.47340157628059387,
-0.42582038044929504,
0.12964627146720886,
-0.4413439929485321,
0.3747105002403259,
0.30728253722190857,
-0.6508569717407227,
-0.30636176466941833,
-0.3491849899291992,
-0.050785750... |
EmmaGthn/results | EmmaGthn | 2023-11-29T15:35:43Z | 0 | 0 | null | [
"tensorboard",
"generated_from_trainer",
"base_model:meta-llama/Llama-2-7b-hf",
"region:us"
] | 2023-11-29T15:35:43Z | 2023-11-07T16:37:54.000Z | null | null | ---
base_model: meta-llama/Llama-2-7b-hf
tags:
- generated_from_trainer
model-index:
- name: results
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# results
This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
It achieves the following results on the evaluation set:
- eval_loss: 1.6415
- eval_runtime: 106.4454
- eval_samples_per_second: 18.789
- eval_steps_per_second: 2.349
- epoch: 0.56
- step: 4200
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 4
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.03
- num_epochs: 100
### Framework versions
- Transformers 4.31.0
- Pytorch 2.1.0+cu118
- Datasets 2.14.6
- Tokenizers 0.13.3
| null | null | null | null | null | null | null | null | null | null | null | null | EmmaGthn/results | [
-0.42339545488357544,
-0.6949540376663208,
0.25024789571762085,
0.34374678134918213,
-0.7141873836517334,
-0.22827847301959991,
-0.06382878124713898,
-0.45058536529541016,
0.2962888777256012,
0.3784639537334442,
-0.7653098702430725,
-0.692507266998291,
-0.8131418824195862,
0.18119567632675... |
MoffQueen/MoffQueenMix | MoffQueen | 2023-11-29T04:49:44Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T04:49:44Z | 2023-11-07T18:32:58.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | MoffQueen/MoffQueenMix | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
WeFanz/asd | WeFanz | 2023-11-29T09:19:02Z | 0 | 0 | null | [
"region:us"
] | 2023-11-29T09:19:02Z | 2023-11-08T01:40:15.000Z | null | null | Entry not found | null | null | null | null | null | null | null | null | null | null | null | null | WeFanz/asd | [
-0.3227650821208954,
-0.22568479180335999,
0.8622263669967651,
0.4346153140068054,
-0.5282987952232361,
0.7012966871261597,
0.7915722727775574,
0.07618651539087296,
0.7746027112007141,
0.2563222348690033,
-0.7852821350097656,
-0.225738525390625,
-0.910447895526886,
0.5715667009353638,
-0... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.