midah commited on
Commit
3606fc2
·
verified ·
1 Parent(s): af7e93c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Baichuan2-13B-Chat_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +261 -0
  2. Baichuan2-7B-Chat_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +235 -0
  3. Cerebras-GPT-13B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +184 -0
  4. ChatTTS_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +56 -0
  5. CogVideoX-5b-I2V_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +281 -0
  6. ControlNetMediaPipeFace_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +213 -0
  7. Cyberpunk-Anime-Diffusion_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +122 -0
  8. DeepSeek-R1-Distill-Llama-8B-GGUF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +599 -0
  9. Emu3-Gen_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +15 -0
  10. FastHunyuan_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +60 -0
  11. GPT-JT-6B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +159 -0
  12. Hotshot-XL_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +48 -0
  13. Hunyuan3D-1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +258 -0
  14. Hunyuan3D-2mv_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +113 -0
  15. HunyuanVideo-I2V_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +524 -0
  16. IP-Adapter_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +48 -0
  17. LLaMA-2-7B-32K_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +618 -0
  18. Llama-2-70B-Chat-GGML_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +303 -0
  19. Llama-3-Refueled_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +135 -0
  20. Llama-3_1-Nemotron-51B-Instruct_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +216 -0
  21. MiniMax-VL-01_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +2 -0
  22. Molmo-72B-0924_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +211 -0
  23. MythoMax-L2-13B-GPTQ_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +315 -0
  24. NeuralBeagle14-7B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  25. Nous-Capybara-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +125 -0
  26. Nous-Hermes-2-Vision-Alpha_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +125 -0
  27. Nous-Hermes-2-Yi-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +353 -0
  28. Open-Sora_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +32 -0
  29. Phi-3-mini-4k-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
  30. Phi-3-small-128k-instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +715 -0
  31. Qwen2-7B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  32. Ruyi-Mini-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +196 -0
  33. SuperNova-Medius_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv +267 -0
  34. Van-Gogh-diffusion_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +67 -0
  35. Wayfarer-12B_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +72 -0
  36. XTTS-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +93 -0
  37. Yi-34B-200K_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
  38. bart-large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +0 -0
  39. bertweet-base-sentiment-analysis_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
  40. canary-1b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +546 -0
  41. chatglm3-6b-32k_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +113 -0
  42. chinese-roberta-wwm-ext_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
  43. deepseek-coder-33b-instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +247 -0
  44. emotion-english-distilroberta-base_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +292 -0
  45. falcon-40b-instruct-GPTQ_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +396 -0
  46. flan-t5-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +0 -0
  47. flux-lora-collection_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +161 -0
  48. flux1-dev_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +12 -0
  49. gpt-neox-20b_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv +600 -0
  50. gpt2-medium_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +0 -0
Baichuan2-13B-Chat_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ baichuan-inc/Baichuan2-13B-Chat,"---
3
+ language:
4
+ - en
5
+ - zh
6
+ license: other
7
+ tasks:
8
+ - text-generation
9
+ ---
10
+
11
+ <!-- markdownlint-disable first-line-h1 -->
12
+ <!-- markdownlint-disable html -->
13
+ <div align=""center"">
14
+ <h1>
15
+ Baichuan 2
16
+ </h1>
17
+ </div>
18
+
19
+ <div align=""center"">
20
+ <a href=""https://github.com/baichuan-inc/Baichuan2"" target=""_blank"">🦉GitHub</a> | <a href=""https://github.com/baichuan-inc/Baichuan-7B/blob/main/media/wechat.jpeg?raw=true"" target=""_blank"">💬WeChat</a>
21
+ </div>
22
+ <div align=""center"">
23
+ 百川API支持搜索增强和192K长窗口,新增百川搜索增强知识库、限时免费!<br>
24
+ 🚀 <a href=""https://www.baichuan-ai.com/"" target=""_blank"">百川大模型在线对话平台</a> 已正式向公众开放 🎉
25
+ </div>
26
+
27
+ # 目录/Table of Contents
28
+
29
+ - [📖 模型介绍/Introduction](#Introduction)
30
+ - [⚙️ 快速开始/Quick Start](#Start)
31
+ - [📊 Benchmark评估/Benchmark Evaluation](#Benchmark)
32
+ - [👥 社区与生态/Community](#Community)
33
+ - [📜 声明与协议/Terms and Conditions](#Terms)
34
+
35
+ # 更新/Update
36
+ [2023.12.29] 🎉🎉🎉 我们发布了 **[Baichuan2-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat) v2** 版本。其中:
37
+ - 大幅提升了模型的综合能力,特别是数学和逻辑推理、复杂指令跟随能力。
38
+ - 使用时需指定revision=v2.0,详细方法参考[快速开始](#Start)
39
+
40
+ # <span id=""Introduction"">模型介绍/Introduction</span>
41
+
42
+ Baichuan 2 是[百川智能]推出的新一代开源大语言模型,采用 **2.6 万亿** Tokens 的高质量语料训练,在权威的中文和英文 benchmark
43
+ 上均取得同尺寸最好的效果。本次发布包含有 7B、13B 的 Base 和 Chat 版本,并提供了 Chat 版本的 4bits
44
+ 量化,所有版本不仅对学术研究完全开放,开发者也仅需[邮件申请]并获得官方商用许可后,即可以免费商用。具体发布版本和下载见下表:
45
+
46
+ Baichuan 2 is the new generation of large-scale open-source language models launched by [Baichuan Intelligence inc.](https://www.baichuan-ai.com/).
47
+ It is trained on a high-quality corpus with 2.6 trillion tokens and has achieved the best performance in authoritative Chinese and English benchmarks of the same size.
48
+ This release includes 7B and 13B versions for both Base and Chat models, along with a 4bits quantized version for the Chat model.
49
+ All versions are fully open to academic research, and developers can also use them for free in commercial applications after obtaining an official commercial license through [email request](mailto:opensource@baichuan-inc.com).
50
+ The specific release versions and download links are listed in the table below:
51
+
52
+ | | Base Model | Chat Model | 4bits Quantized Chat Model |
53
+ |:---:|:--------------------:|:--------------------:|:--------------------------:|
54
+ | 7B | [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) | [Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) | [Baichuan2-7B-Chat-4bits](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base-4bits) |
55
+ | 13B | [Baichuan2-13B-Base](https://huggingface.co/baichuan-inc/Baichuan2-13B-Base) | [Baichuan2-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat) | [Baichuan2-13B-Chat-4bits](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat-4bits) |
56
+
57
+ # <span id=""Start"">快速开始/Quick Start</span>
58
+
59
+ 在Baichuan2系列模型中,我们为了加快推理速度使用了Pytorch2.0加入的新功能F.scaled_dot_product_attention,因此模型需要在Pytorch2.0环境下运行。
60
+
61
+ In the Baichuan 2 series models, we have utilized the new feature `F.scaled_dot_product_attention` introduced in PyTorch 2.0 to accelerate inference speed. Therefore, the model needs to be run in a PyTorch 2.0 environment.
62
+
63
+
64
+ ```python
65
+ import torch
66
+ from transformers import AutoModelForCausalLM, AutoTokenizer
67
+ from transformers.generation.utils import GenerationConfig
68
+ tokenizer = AutoTokenizer.from_pretrained(""baichuan-inc/Baichuan2-13B-Chat"",
69
+ revision=""v2.0"",
70
+ use_fast=False,
71
+ trust_remote_code=True)
72
+ model = AutoModelForCausalLM.from_pretrained(""baichuan-inc/Baichuan2-13B-Chat"",
73
+ revision=""v2.0"",
74
+ device_map=""auto"",
75
+ torch_dtype=torch.bfloat16,
76
+ trust_remote_code=True)
77
+ model.generation_config = GenerationConfig.from_pretrained(""baichuan-inc/Baichuan2-13B-Chat"", revision=""v2.0"")
78
+ messages = []
79
+ messages.append({""role"": ""user"", ""content"": ""解释一下“温故而知新”""})
80
+ response = model.chat(tokenizer, messages)
81
+ print(response)
82
+ ""温故而知新""是一句中国古代的成语,出自《论语·为政》篇。这句话的意思是:通过回顾过去,我们可以发现新的知识和理解。换句话说,学习历史和经验可以让我们更好地理解现在和未来。
83
+
84
+ 这句话鼓��我们在学习和生活中不断地回顾和反思过去的经验,从而获得新的启示和成长。通过重温旧的知识和经历,我们可以发现新的观点和理解,从而更好地应对不断变化的世界和挑战。
85
+ ```
86
+ **注意:如需使用老版本,需手动指定revision参数,设置revision=v1.0**
87
+
88
+ # <span id=""Benchmark"">Benchmark 结果/Benchmark Evaluation</span>
89
+
90
+ 我们在[通用]、[法律]、[医疗]、[数学]、[代码]和[多语言翻译]六个领域的中英文权威数据集上对模型进行了广泛测试,更多详细测评结果可查看[GitHub]。
91
+
92
+ We have extensively tested the model on authoritative Chinese-English datasets across six domains: [General](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#general-domain), [Legal](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#law-and-medicine), [Medical](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#law-and-medicine), [Mathematics](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#mathematics-and-code), [Code](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#mathematics-and-code), and [Multilingual Translation](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#multilingual-translation). For more detailed evaluation results, please refer to [GitHub](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md).
93
+
94
+ ### 7B Model Results
95
+
96
+ | | **C-Eval** | **MMLU** | **CMMLU** | **Gaokao** | **AGIEval** | **BBH** |
97
+ |:-----------------------:|:----------:|:--------:|:---------:|:----------:|:-----------:|:-------:|
98
+ | | 5-shot | 5-shot | 5-shot | 5-shot | 5-shot | 3-shot |
99
+ | **GPT-4** | 68.40 | 83.93 | 70.33 | 66.15 | 63.27 | 75.12 |
100
+ | **GPT-3.5 Turbo** | 51.10 | 68.54 | 54.06 | 47.07 | 46.13 | 61.59 |
101
+ | **LLaMA-7B** | 27.10 | 35.10 | 26.75 | 27.81 | 28.17 | 32.38 |
102
+ | **LLaMA2-7B** | 28.90 | 45.73 | 31.38 | 25.97 | 26.53 | 39.16 |
103
+ | **MPT-7B** | 27.15 | 27.93 | 26.00 | 26.54 | 24.83 | 35.20 |
104
+ | **Falcon-7B** | 24.23 | 26.03 | 25.66 | 24.24 | 24.10 | 28.77 |
105
+ | **ChatGLM2-6B** | 50.20 | 45.90 | 49.00 | 49.44 | 45.28 | 31.65 |
106
+ | **[Baichuan-7B]** | 42.80 | 42.30 | 44.02 | 36.34 | 34.44 | 32.48 |
107
+ | **[Baichuan2-7B-Base]** | 54.00 | 54.16 | 57.07 | 47.47 | 42.73 | 41.56 |
108
+
109
+ ### 13B Model Results
110
+
111
+ | | **C-Eval** | **MMLU** | **CMMLU** | **Gaokao** | **AGIEval** | **BBH** |
112
+ |:---------------------------:|:----------:|:--------:|:---------:|:----------:|:-----------:|:-------:|
113
+ | | 5-shot | 5-shot | 5-shot | 5-shot | 5-shot | 3-shot |
114
+ | **GPT-4** | 68.40 | 83.93 | 70.33 | 66.15 | 63.27 | 75.12 |
115
+ | **GPT-3.5 Turbo** | 51.10 | 68.54 | 54.06 | 47.07 | 46.13 | 61.59 |
116
+ | **LLaMA-13B** | 28.50 | 46.30 | 31.15 | 28.23 | 28.22 | 37.89 |
117
+ | **LLaMA2-13B** | 35.80 | 55.09 | 37.99 | 30.83 | 32.29 | 46.98 |
118
+ | **Vicuna-13B** | 32.80 | 52.00 | 36.28 | 30.11 | 31.55 | 43.04 |
119
+ | **Chinese-Alpaca-Plus-13B** | 38.80 | 43.90 | 33.43 | 34.78 | 35.46 | 28.94 |
120
+ | **XVERSE-13B** | 53.70 | 55.21 | 58.44 | 44.69 | 42.54 | 38.06 |
121
+ | **[Baichuan-13B-Base]** | 52.40 | 51.60 | 55.30 | 49.69 | 43.20 | 43.01 |
122
+ | **[Baichuan2-13B-Base]** | 58.10 | 59.17 | 61.97 | 54.33 | 48.17 | 48.78 |
123
+
124
+ ## 训练过程模型/Training Dynamics
125
+
126
+ 除了训练了 2.6 万亿 Tokens 的 [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) 模型,我们还提供了在此之前的另外 11 个中间过程的模型(分别对应训练了约 0.2 ~ 2.4 万亿 Tokens)供社区研究使用
127
+ ([训练过程checkpoint下载](https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints))。下图给出了这些 checkpoints 在 C-Eval、MMLU、CMMLU 三个 benchmark 上的效果变化:
128
+
129
+ In addition to the [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) model trained on 2.6 trillion tokens, we also offer 11 additional intermediate-stage models for community research, corresponding to training on approximately 0.2 to 2.4 trillion tokens each ([Intermediate Checkpoints Download](https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints)). The graph below shows the performance changes of these checkpoints on three benchmarks: C-Eval, MMLU, and CMMLU.
130
+
131
+ ![checkpoint](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/checkpoints.jpeg)
132
+
133
+ # <span id=""Community"">社区与生态/Community</span>
134
+
135
+ ## Intel 酷睿 Ultra 平台运行百川大模型
136
+
137
+ 使用酷睿™/至强® 可扩展处理器或配合锐炫™ GPU等进行部署[Baichuan2-7B-Chat],[Baichuan2-13B-Chat]模型,推荐使用 BigDL-LLM([CPU], [GPU])以发挥更好推理性能。
138
+
139
+ 详细支持信息可参考[中文操作手册](https://github.com/intel-analytics/bigdl-llm-tutorial/tree/main/Chinese_Version),包括用notebook支持,[加载,优化,保存方法](https://github.com/intel-analytics/bigdl-llm-tutorial/blob/main/Chinese_Version/ch_3_AppDev_Basic/3_BasicApp.ipynb)等。
140
+
141
+ When deploy on Core™/Xeon® Scalable Processors or with Arc™ GPU, BigDL-LLM ([CPU], [GPU]) is recommended to take full advantage of better inference performance.
142
+
143
+ # <span id=""Terms"">声明与协议/Terms and Conditions</span>
144
+
145
+ ## 声明
146
+
147
+ 我们在此声明,我们的开发团队并未基于 Baichuan 2 模型开发任何应用,无论是在 iOS、Android、网页或任何其他平台。我们强烈呼吁所有使用者,不要利用
148
+ Baichuan 2 模型进行任何危害国家社会安全或违法的活动。另外,我们也要求使用者不要将 Baichuan 2
149
+ 模型用于未经适当安全审查和备案的互联网服务。我们希望所有的使用者都能遵守这个原则,确保科技的发展能在规范和合法的环境下进行。
150
+
151
+ 我们已经尽我们所能,来确保模型训练过程中使用的数据的合规性。然而,尽管我们已经做出了巨大的努力,但由于模型和数据的复杂性,仍有可能存在一些无法预见的问题。因此,如果由于使用
152
+ Baichuan 2 开源模型而导致的任何问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。
153
+
154
+ We hereby declare that our team has not developed any applications based on Baichuan 2 models, not on iOS, Android, the web, or any other platform. We strongly call on all users not to use Baichuan 2 models for any activities that harm national / social security or violate the law. Also, we ask users not to use Baichuan 2 models for Internet services that have not undergone appropriate security reviews and filings. We hope that all users can abide by this principle and ensure that the development of technology proceeds in a regulated and legal environment.
155
+
156
+ We have done our best to ensure the compliance of the data used in the model training process. However, despite our considerable efforts, there may still be some unforeseeable issues due to the complexity of the model and data. Therefore, if any problems arise due to the use of Baichuan 2 open-source models, including but not limited to data security issues, public opinion risks, or any risks and problems brought about by the model being misled, abused, spread or improperly exploited, we will not assume any responsibility.
157
+
158
+ ## 协议
159
+
160
+ 社区使用 Baichuan 2 模型需要遵循 [Apache 2.0](https://github.com/baichuan-inc/Baichuan2/blob/main/LICENSE) 和[《Baichuan 2 模型社区许可协议》](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)。Baichuan 2 模型支持商业用途,如果您计划将 Baichuan 2 模型或其衍生品用于商业目的,请您确认您的主体符合以下情况:
161
+ 1. 您或您的关联方的服务或产品的日均用户活跃量(DAU)低于100万。
162
+ 2. 您或您的关联方不是软件服务提供商、云服务提供商。
163
+ 3. 您或您的关联方不存在将授予您的商用许可,未经百川许可二次授权给其他第三方的可能。
164
+
165
+ 在符合以上条件的前提下,您需要通过以下联系邮箱 opensource@baichuan-inc.com ,提交《Baichuan 2 模型社区许可协议》要求的申请材料。审核通过后,百川将特此授予您一个非排他性、全球性、不可转让、不可再许可、可撤销的商用版权许可。
166
+
167
+ The community usage of Baichuan 2 model requires adherence to [Apache 2.0](https://github.com/baichuan-inc/Baichuan2/blob/main/LICENSE) and [Community License for Baichuan2 Model](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf). The Baichuan 2 model supports commercial use. If you plan to use the Baichuan 2 model or its derivatives for commercial purposes, please ensure that your entity meets the following conditions:
168
+
169
+ 1. The Daily Active Users (DAU) of your or your affiliate's service or product is less than 1 million.
170
+ 2. Neither you nor your affiliates are software service providers or cloud service providers.
171
+ 3. There is no possibility for you or your affiliates to grant the commercial license given to you, to reauthorize it to other third parties without Baichuan's permission.
172
+
173
+ Upon meeting the above conditions, you need to submit the application materials required by the Baichuan 2 Model Community License Agreement via the following contact email: opensource@baichuan-inc.com. Once approved, Baichuan will hereby grant you a non-exclusive, global, non-transferable, non-sublicensable, revocable commercial copyright license.
174
+
175
+
176
+ [GitHub]:https://github.com/baichuan-inc/Baichuan2
177
+ [Baichuan2]:https://github.com/baichuan-inc/Baichuan2
178
+
179
+ [Baichuan-7B]:https://huggingface.co/baichuan-inc/Baichuan-7B
180
+ [Baichuan2-7B-Base]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Base
181
+ [Baichuan2-7B-Chat]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat
182
+ [Baichuan2-7B-Chat-4bits]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat-4bits
183
+ [Baichuan-13B-Base]:https://huggingface.co/baichuan-inc/Baichuan-13B-Base
184
+ [Baichuan2-13B-Base]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Base
185
+ [Baichuan2-13B-Chat]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat
186
+ [Baichuan2-13B-Chat-4bits]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat-4bits
187
+
188
+ [通用]:https://github.com/baichuan-inc/Baichuan2#%E9%80%9A%E7%94%A8%E9%A2%86%E5%9F%9F
189
+ [法律]:https://github.com/baichuan-inc/Baichuan2#%E6%B3%95%E5%BE%8B%E5%8C%BB%E7%96%97
190
+ [医疗]:https://github.com/baichuan-inc/Baichuan2#%E6%B3%95%E5%BE%8B%E5%8C%BB%E7%96%97
191
+ [数学]:https://github.com/baichuan-inc/Baichuan2#%E6%95%B0%E5%AD%A6%E4%BB%A3%E7%A0%81
192
+ [代码]:https://github.com/baichuan-inc/Baichuan2#%E6%95%B0%E5%AD%A6%E4%BB%A3%E7%A0%81
193
+ [多语言翻译]:https://github.com/baichuan-inc/Baichuan2#%E5%A4%9A%E8%AF%AD%E8%A8%80%E7%BF%BB%E8%AF%91
194
+
195
+ [《Baichuan 2 模型社区许可协议》]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf
196
+
197
+ [邮件申请]: mailto:opensource@baichuan-inc.com
198
+ [Email]: mailto:opensource@baichuan-inc.com
199
+ [opensource@baichuan-inc.com]: mailto:opensource@baichuan-inc.com
200
+ [训练过程heckpoint下载]: https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints
201
+ [百川智能]: https://www.baichuan-ai.com
202
+
203
+ [CPU]: https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2
204
+ [GPU]: https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2","{""id"": ""baichuan-inc/Baichuan2-13B-Chat"", ""author"": ""baichuan-inc"", ""sha"": ""c8d877c7ca596d9aeff429d43bff06e288684f45"", ""last_modified"": ""2024-02-26 08:58:32+00:00"", ""created_at"": ""2023-08-29 02:30:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7032, ""downloads_all_time"": null, ""likes"": 424, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""baichuan"", ""text-generation"", ""custom_code"", ""en"", ""zh"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense: other\ntasks:\n- text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""BaichuanForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_baichuan.BaichuanConfig"", ""AutoModelForCausalLM"": ""modeling_baichuan.BaichuanForCausalLM""}, ""model_type"": ""baichuan"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Baichuan2 \u6a21\u578b\u793e\u533a\u8bb8\u53ef\u534f\u8bae.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Community License for Baichuan2 Model.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='handler.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00003.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantizer.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""EmbeddedLLM/chat-template-generation"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""officialhimanshu595/llama-factory"", ""Dify-AI/Baichuan2-13B-Chat"", ""li-qing/FIRE"", ""Zulelee/langchain-chatchat"", ""xu-song/kplug"", ""tianleliphoebe/visual-arena"", ""Ashmal/MobiLlama"", ""IS2Lab/S-Eval"", ""PegaMichael/Taiwan-LLaMa2-Copy"", ""silk-road/ChatHaruhi-Qwen118k-Extended"", ""tjtanaa/chat-template-generation"", ""CaiRou-Huang/TwLLM7B-v2.0-base"", ""blackwingedkite/gutalk"", ""cllatMTK/Breeze"", ""zivzhao/Baichuan2-13B-Chat"", ""blackwingedkite/alpaca2_clas"", ""silk-road/ChatHaruhi-BaiChuan2-13B"", ""Bofeee5675/FIRE"", ""evelyn-lo/evelyn"", ""yuantao-infini-ai/demo_test"", ""zjasper666/bf16_vs_fp8"", ""martinakaduc/melt"", ""cloneQ/internLMRAG"", ""hujin0929/LlamaIndex_RAG"", ""flyfive0315/internLlamaIndex"", ""sunxiaokang/llamaindex_RAG_web"", ""kai119/llama"", ""qxy826982153/LlamaIndexRAG"", ""ilemon/Internlm2.5LLaMAindexRAG"", ""msun415/Llamole""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-26 08:58:32+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense: other\ntasks:\n- text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64ed5829453a4b4bef2814a2"", ""modelId"": ""baichuan-inc/Baichuan2-13B-Chat"", ""usedStorage"": 111175613599}",0,https://huggingface.co/zimyu/baichuan2-13b-zsee-lora,1,https://huggingface.co/yanxinlan/adapter,1,"https://huggingface.co/TheBloke/Baichuan2-13B-Chat-GPTQ, https://huggingface.co/second-state/Baichuan2-13B-Chat-GGUF, https://huggingface.co/mradermacher/Baichuan2-13B-Chat-GGUF, https://huggingface.co/mradermacher/Baichuan2-13B-Chat-i1-GGUF",4,,0,"Ashmal/MobiLlama, Bofeee5675/FIRE, EmbeddedLLM/chat-template-generation, IS2Lab/S-Eval, Justinrune/LLaMA-Factory, Zulelee/langchain-chatchat, blackwingedkite/gutalk, eduagarcia/open_pt_llm_leaderboard, evelyn-lo/evelyn, huggingface/InferenceSupport/discussions/new?title=baichuan-inc/Baichuan2-13B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbaichuan-inc%2FBaichuan2-13B-Chat%5D(%2Fbaichuan-inc%2FBaichuan2-13B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, martinakaduc/melt, xu-song/kplug",13
205
+ zimyu/baichuan2-13b-zsee-lora,"---
206
+ base_model:
207
+ - baichuan-inc/Baichuan2-13B-Chat
208
+ tags:
209
+ - chemistry
210
+ ---
211
+ This LoRA model was fine-tuned using the zeolite synthesis dataset ZSEE.
212
+
213
+ Usage:
214
+ ```
215
+ import torch
216
+ from transformers import (
217
+ AutoConfig,
218
+ AutoTokenizer,
219
+ AutoModelForCausalLM,
220
+ GenerationConfig
221
+ )
222
+ from peft import PeftModel
223
+ device = torch.device(""cuda"" if torch.cuda.is_available() else ""cpu"")
224
+ model_path = 'baichuan-inc/Baichuan2-13B-Chat'
225
+ lora_path = 'zimyu/baichuan2-13b-zsee-lora'
226
+
227
+ config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
228
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
229
+
230
+ model = AutoModelForCausalLM.from_pretrained(
231
+ model_path,
232
+ config=config,
233
+ device_map=""auto"",
234
+ torch_dtype=torch.bfloat16,
235
+ trust_remote_code=True,
236
+ )
237
+
238
+ model = PeftModel.from_pretrained(
239
+ model,
240
+ lora_path,
241
+ )
242
+ model.eval()
243
+
244
+ system_prompt = ""<<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n\n""
245
+ sintruct = ""{\""instruction\"": \""You are an expert in event argument extraction. Please extract event arguments and their roles from the input that conform to the schema definition, which already includes event trigger words. If an argument does not exist, return NAN or an empty dictionary. Please respond in the format of a JSON string.\"", \""schema\"": [{\""event_type\"": \""Add\"", \""trigger\"": [\""added\""], \""arguments\"": [\""container\"", \""material\"", \""temperature\""]}, {\""event_type\"": \""Stir\"", \""trigger\"": [\""stirred\""], \""arguments\"": [\""sample\"", \""revolution\"", \""temperature\"", \""duration\""]}], \""input\"": \""Subsequently , the pre-prepared silicalite-1 seed was added to the above mixture and stirred for another 1 h , and the quantity of seed equals to 7.0 wt% of the total SiO2 in the starting gel .\""}""
246
+ sintruct = '[INST] ' + system_prompt + sintruct + ' [/INST]'
247
+
248
+ input_ids = tokenizer.encode(sintruct, return_tensors=""pt"").to(device)
249
+ input_length = input_ids.size(1)
250
+ generation_output = model.generate(input_ids=input_ids, generation_config=GenerationConfig(max_length=512, max_new_tokens=256, return_dict_in_generate=True))
251
+ generation_output = generation_output.sequences[0]
252
+ generation_output = generation_output[input_length:]
253
+ output = tokenizer.decode(generation_output, skip_special_tokens=True)
254
+
255
+ print(output)
256
+ ```
257
+
258
+ Output:
259
+ ```
260
+ {""Add"": [{""container"": ""NAN"", ""material"": [""above mixture"", ""pre-prepared silicalite-1 seed""], ""temperature"": ""NAN""}], ""Stir"": [{""sample"": ""NAN"", ""revolution"": ""NAN"", ""temperature"": ""NAN"", ""duration"": ""1 h""}]}
261
+ ```","{""id"": ""zimyu/baichuan2-13b-zsee-lora"", ""author"": ""zimyu"", ""sha"": ""4cbdd08c1d7b96d8ee9183f594187e54541537bb"", ""last_modified"": ""2025-01-08 13:15:57+00:00"", ""created_at"": ""2025-01-08 03:56:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""chemistry"", ""base_model:baichuan-inc/Baichuan2-13B-Chat"", ""base_model:finetune:baichuan-inc/Baichuan2-13B-Chat"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- baichuan-inc/Baichuan2-13B-Chat\ntags:\n- chemistry"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-08 13:15:57+00:00"", ""cardData"": ""base_model:\n- baichuan-inc/Baichuan2-13B-Chat\ntags:\n- chemistry"", ""transformersInfo"": null, ""_id"": ""677df7617b04df2925cafa2f"", ""modelId"": ""zimyu/baichuan2-13b-zsee-lora"", ""usedStorage"": 892654240}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=zimyu/baichuan2-13b-zsee-lora&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bzimyu%2Fbaichuan2-13b-zsee-lora%5D(%2Fzimyu%2Fbaichuan2-13b-zsee-lora)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Baichuan2-7B-Chat_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ baichuan-inc/Baichuan2-7B-Chat,"---
3
+ language:
4
+ - en
5
+ - zh
6
+ license_name: baichuan2-community-license
7
+ license_link: https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/Community%20License%20for%20Baichuan2%20Model.pdf
8
+ tasks:
9
+ - text-generation
10
+ ---
11
+
12
+ <!-- markdownlint-disable first-line-h1 -->
13
+ <!-- markdownlint-disable html -->
14
+ <div align=""center"">
15
+ <h1>
16
+ Baichuan 2
17
+ </h1>
18
+ </div>
19
+
20
+ <div align=""center"">
21
+ <a href=""https://github.com/baichuan-inc/Baichuan2"" target=""_blank"">🦉GitHub</a> | <a href=""https://github.com/baichuan-inc/Baichuan-7B/blob/main/media/wechat.jpeg?raw=true"" target=""_blank"">💬WeChat</a>
22
+ </div>
23
+ <div align=""center"">
24
+ 百川API支持搜索增强和192K长窗口,新增百川搜索增强知识库、限时免费!<br>
25
+ 🚀 <a href=""https://www.baichuan-ai.com/"" target=""_blank"">百川大模型在线对话平台</a> 已正式向公众开放 🎉
26
+ </div>
27
+
28
+ # 目录/Table of Contents
29
+
30
+ - [📖 模型介绍/Introduction](#Introduction)
31
+ - [⚙️ 快速开始/Quick Start](#Start)
32
+ - [📊 Benchmark评估/Benchmark Evaluation](#Benchmark)
33
+ - [👥 社区与生态/Community](#Community)
34
+ - [📜 声明与协议/Terms and Conditions](#Terms)
35
+
36
+
37
+ # <span id=""Introduction"">模型介绍/Introduction</span>
38
+
39
+ Baichuan 2 是[百川智能]推出的新一代开源大语言模型,采用 **2.6 万亿** Tokens 的高质量语料训练,在权威的中文和英文 benchmark
40
+ 上均取得同尺寸最好的效果。本次发布包含有 7B、13B 的 Base 和 Chat 版本,并提供了 Chat 版本的 4bits
41
+ 量化,所有版本不仅对学术研究完全开放,开发者也仅需[邮件申请]并获得官方商用许可后,即可以免费商用。具体发布版本和下载见下表:
42
+
43
+ Baichuan 2 is the new generation of large-scale open-source language models launched by [Baichuan Intelligence inc.](https://www.baichuan-ai.com/).
44
+ It is trained on a high-quality corpus with 2.6 trillion tokens and has achieved the best performance in authoritative Chinese and English benchmarks of the same size.
45
+ This release includes 7B and 13B versions for both Base and Chat models, along with a 4bits quantized version for the Chat model.
46
+ All versions are fully open to academic research, and developers can also use them for free in commercial applications after obtaining an official commercial license through [email request](mailto:opensource@baichuan-inc.com).
47
+ The specific release versions and download links are listed in the table below:
48
+
49
+ | | Base Model | Chat Model | 4bits Quantized Chat Model |
50
+ |:---:|:--------------------:|:--------------------:|:--------------------------:|
51
+ | 7B | [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) | [Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) | [Baichuan2-7B-Chat-4bits](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base-4bits) |
52
+ | 13B | [Baichuan2-13B-Base](https://huggingface.co/baichuan-inc/Baichuan2-13B-Base) | [Baichuan2-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat) | [Baichuan2-13B-Chat-4bits](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat-4bits) |
53
+
54
+ # <span id=""Start"">快速开始/Quick Start</span>
55
+
56
+ 在Baichuan2系列模型中,我们为了加快推理速度使用了Pytorch2.0加入的新功能F.scaled_dot_product_attention,因此模型需要在Pytorch2.0环境下运行。
57
+
58
+ In the Baichuan 2 series models, we have utilized the new feature `F.scaled_dot_product_attention` introduced in PyTorch 2.0 to accelerate inference speed. Therefore, the model needs to be run in a PyTorch 2.0 environment.
59
+
60
+
61
+ ```python
62
+ import torch
63
+ from transformers import AutoModelForCausalLM, AutoTokenizer
64
+ from transformers.generation.utils import GenerationConfig
65
+ tokenizer = AutoTokenizer.from_pretrained(""baichuan-inc/Baichuan2-7B-Chat"", use_fast=False, trust_remote_code=True)
66
+ model = AutoModelForCausalLM.from_pretrained(""baichuan-inc/Baichuan2-7B-Chat"", device_map=""auto"", torch_dtype=torch.bfloat16, trust_remote_code=True)
67
+ model.generation_config = GenerationConfig.from_pretrained(""baichuan-inc/Baichuan2-7B-Chat"")
68
+ messages = []
69
+ messages.append({""role"": ""user"", ""content"": ""解释一下“温故而知新”""})
70
+ response = model.chat(tokenizer, messages)
71
+ print(response)
72
+ ""温故而知新""是一句中国古代的成语,出自《论语·为政》篇。这句话的意思是:通过回顾过去,我们可以发现新的知识和理解。换句话说,学习历史和经验可以让我们更好地理解现在和未来。
73
+
74
+ 这句话鼓励我们在学习和生活中不断地回顾和反思过去的经验,从而获得新的启示和成长。通过重温旧的知识和经历,我们可以发现新的观点和理解,从而更好地应对不断变化的世界和挑战。
75
+ ```
76
+
77
+ # <span id=""Benchmark"">Benchmark ��果/Benchmark Evaluation</span>
78
+
79
+ 我们在[通用]、[法律]、[医疗]、[数学]、[代码]和[多语言翻译]六个领域的中英文权威数据集上对模型进行了广泛测试,更多详细测评结果可查看[GitHub]。
80
+
81
+ We have extensively tested the model on authoritative Chinese-English datasets across six domains: [General](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#general-domain), [Legal](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#law-and-medicine), [Medical](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#law-and-medicine), [Mathematics](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#mathematics-and-code), [Code](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#mathematics-and-code), and [Multilingual Translation](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md#multilingual-translation). For more detailed evaluation results, please refer to [GitHub](https://github.com/baichuan-inc/Baichuan2/blob/main/README_EN.md).
82
+
83
+ ### 7B Model Results
84
+
85
+ | | **C-Eval** | **MMLU** | **CMMLU** | **Gaokao** | **AGIEval** | **BBH** |
86
+ |:-----------------------:|:----------:|:--------:|:---------:|:----------:|:-----------:|:-------:|
87
+ | | 5-shot | 5-shot | 5-shot | 5-shot | 5-shot | 3-shot |
88
+ | **GPT-4** | 68.40 | 83.93 | 70.33 | 66.15 | 63.27 | 75.12 |
89
+ | **GPT-3.5 Turbo** | 51.10 | 68.54 | 54.06 | 47.07 | 46.13 | 61.59 |
90
+ | **LLaMA-7B** | 27.10 | 35.10 | 26.75 | 27.81 | 28.17 | 32.38 |
91
+ | **LLaMA2-7B** | 28.90 | 45.73 | 31.38 | 25.97 | 26.53 | 39.16 |
92
+ | **MPT-7B** | 27.15 | 27.93 | 26.00 | 26.54 | 24.83 | 35.20 |
93
+ | **Falcon-7B** | 24.23 | 26.03 | 25.66 | 24.24 | 24.10 | 28.77 |
94
+ | **ChatGLM2-6B** | 50.20 | 45.90 | 49.00 | 49.44 | 45.28 | 31.65 |
95
+ | **[Baichuan-7B]** | 42.80 | 42.30 | 44.02 | 36.34 | 34.44 | 32.48 |
96
+ | **[Baichuan2-7B-Base]** | 54.00 | 54.16 | 57.07 | 47.47 | 42.73 | 41.56 |
97
+
98
+ ### 13B Model Results
99
+
100
+ | | **C-Eval** | **MMLU** | **CMMLU** | **Gaokao** | **AGIEval** | **BBH** |
101
+ |:---------------------------:|:----------:|:--------:|:---------:|:----------:|:-----------:|:-------:|
102
+ | | 5-shot | 5-shot | 5-shot | 5-shot | 5-shot | 3-shot |
103
+ | **GPT-4** | 68.40 | 83.93 | 70.33 | 66.15 | 63.27 | 75.12 |
104
+ | **GPT-3.5 Turbo** | 51.10 | 68.54 | 54.06 | 47.07 | 46.13 | 61.59 |
105
+ | **LLaMA-13B** | 28.50 | 46.30 | 31.15 | 28.23 | 28.22 | 37.89 |
106
+ | **LLaMA2-13B** | 35.80 | 55.09 | 37.99 | 30.83 | 32.29 | 46.98 |
107
+ | **Vicuna-13B** | 32.80 | 52.00 | 36.28 | 30.11 | 31.55 | 43.04 |
108
+ | **Chinese-Alpaca-Plus-13B** | 38.80 | 43.90 | 33.43 | 34.78 | 35.46 | 28.94 |
109
+ | **XVERSE-13B** | 53.70 | 55.21 | 58.44 | 44.69 | 42.54 | 38.06 |
110
+ | **[Baichuan-13B-Base]** | 52.40 | 51.60 | 55.30 | 49.69 | 43.20 | 43.01 |
111
+ | **[Baichuan2-13B-Base]** | 58.10 | 59.17 | 61.97 | 54.33 | 48.17 | 48.78 |
112
+
113
+ ## 训练过程模型/Training Dynamics
114
+
115
+ 除了训练了 2.6 万亿 Tokens 的 [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) 模型,我们还提供了在此之前的另外 11 个中间过程的模型(分别对应训练了约 0.2 ~ 2.4 万亿 Tokens)供社区研究使用
116
+ ([训练过程checkpoint下载](https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints))。下图给出了这些 checkpoints 在 C-Eval、MMLU、CMMLU 三个 benchmark 上的效果变化:
117
+
118
+ In addition to the [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) model trained on 2.6 trillion tokens, we also offer 11 additional intermediate-stage models for community research, corresponding to training on approximately 0.2 to 2.4 trillion tokens each ([Intermediate Checkpoints Download](https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints)). The graph below shows the performance changes of these checkpoints on three benchmarks: C-Eval, MMLU, and CMMLU.
119
+
120
+ ![checkpoint](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/checkpoints.jpeg)
121
+
122
+ # <span id=""Community"">社区与生态/Community</span>
123
+
124
+ ## Intel 酷睿 Ultra 平台运行百川大模型
125
+
126
+ 使用酷睿™/至强® 可扩展处理器或配合锐炫™ GPU等进行部署[Baichuan2-7B-Chat],[Baichuan2-13B-Chat]模型,推荐使用 BigDL-LLM([CPU], [GPU])以发挥更好推理性能。
127
+
128
+ 详细支持信息可参考[中文操作手册](https://github.com/intel-analytics/bigdl-llm-tutorial/tree/main/Chinese_Version),包括用notebook支持,[加载,优化,保存方法](https://github.com/intel-analytics/bigdl-llm-tutorial/blob/main/Chinese_Version/ch_3_AppDev_Basic/3_BasicApp.ipynb)等。
129
+
130
+ When deploy on Core™/Xeon® Scalable Processors or with Arc™ GPU, BigDL-LLM ([CPU], [GPU]) is recommended to take full advantage of better inference performance.
131
+
132
+ # <span id=""Terms"">声明与协议/Terms and Conditions</span>
133
+
134
+ ## 声明
135
+
136
+ 我们在此声明,我们的开发团队并未基于 Baichuan 2 模型开发任何应用,无论是在 iOS、Android、网页或任何其他平台。我们强烈呼吁所有使用者,不要利用
137
+ Baichuan 2 模型进行任何危害国家社会安全或违法的活动。另外,我们也要求使用者不要将 Baichuan 2
138
+ 模型用于未经适当安全审查和备案的互联网服务。我们希望所有的使用者都能遵守这个原则,确保科技的发展能在规范和合法的环境下进行。
139
+
140
+ 我们已经尽我们所能,来确保模型训练过程中使用的数据的合规性。然而,尽管我们已经做出了巨大的努力,但由于模型和数据的复杂性,仍有可能存在一些无法预见的问题。因此,如果由于使用
141
+ Baichuan 2 开源模型而导致的任何问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。
142
+
143
+ We hereby declare that our team has not developed any applications based on Baichuan 2 models, not on iOS, Android, the web, or any other platform. We strongly call on all users not to use Baichuan 2 models for any activities that harm national / social security or violate the law. Also, we ask users not to use Baichuan 2 models for Internet services that have not undergone appropriate security reviews and filings. We hope that all users can abide by this principle and ensure that the development of technology proceeds in a regulated and legal environment.
144
+
145
+ We have done our best to ensure the compliance of the data used in the model training process. However, despite our considerable efforts, there may still be some unforeseeable issues due to the complexity of the model and data. Therefore, if any problems arise due to the use of Baichuan 2 open-source models, including but not limited to data security issues, public opinion risks, or any risks and problems brought about by the model being misled, abused, spread or improperly exploited, we will not assume any responsibility.
146
+
147
+ ## 协议
148
+
149
+ 社区使用 Baichuan 2 模型需要遵循 [Apache 2.0](https://github.com/baichuan-inc/Baichuan2/blob/main/LICENSE) 和[《Baichuan 2 模型社区许可协议》](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)。Baichuan 2 模型支持商业用途,如果您计划将 Baichuan 2 模型或其衍生品用于商业目的,请您确认您的主体符合以下情况:
150
+ 1. 您或您的关联方的服务或产品的日均用户活跃量(DAU)低于100万。
151
+ 2. 您或您的关联方不是软件服务提供商、云服务提供商。
152
+ 3. 您或您的关联方不存在将授予您的商用许可,未经百川许可二次授权给其他第三方的可能。
153
+
154
+ 在符合以上条件的前提下,您需要通过以下联系邮箱 opensource@baichuan-inc.com ,提交《Baichuan 2 模型社区许可协议》要求的申请材料。审核通过后,百川将特此授予您一个非排他性、全球性、不可转让、不可再许可、可撤销的商用版权许可。
155
+
156
+ The community usage of Baichuan 2 model requires adherence to [Apache 2.0](https://github.com/baichuan-inc/Baichuan2/blob/main/LICENSE) and [Community License for Baichuan2 Model](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf). The Baichuan 2 model supports commercial use. If you plan to use the Baichuan 2 model or its derivatives for commercial purposes, please ensure that your entity meets the following conditions:
157
+
158
+ 1. The Daily Active Users (DAU) of your or your affiliate's service or product is less than 1 million.
159
+ 2. Neither you nor your affiliates are software service providers or cloud service providers.
160
+ 3. There is no possibility for you or your affiliates to grant the commercial license given to you, to reauthorize it to other third parties without Baichuan's permission.
161
+
162
+ Upon meeting the above conditions, you need to submit the application materials required by the Baichuan 2 Model Community License Agreement via the following contact email: opensource@baichuan-inc.com. Once approved, Baichuan will hereby grant you a non-exclusive, global, non-transferable, non-sublicensable, revocable commercial copyright license.
163
+
164
+
165
+ [GitHub]:https://github.com/baichuan-inc/Baichuan2
166
+ [Baichuan2]:https://github.com/baichuan-inc/Baichuan2
167
+
168
+ [Baichuan-7B]:https://huggingface.co/baichuan-inc/Baichuan-7B
169
+ [Baichuan2-7B-Base]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Base
170
+ [Baichuan2-7B-Chat]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat
171
+ [Baichuan2-7B-Chat-4bits]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat-4bits
172
+ [Baichuan-13B-Base]:https://huggingface.co/baichuan-inc/Baichuan-13B-Base
173
+ [Baichuan2-13B-Base]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Base
174
+ [Baichuan2-13B-Chat]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat
175
+ [Baichuan2-13B-Chat-4bits]:https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat-4bits
176
+
177
+ [通用]:https://github.com/baichuan-inc/Baichuan2#%E9%80%9A%E7%94%A8%E9%A2%86%E5%9F%9F
178
+ [法律]:https://github.com/baichuan-inc/Baichuan2#%E6%B3%95%E5%BE%8B%E5%8C%BB%E7%96%97
179
+ [医疗]:https://github.com/baichuan-inc/Baichuan2#%E6%B3%95%E5%BE%8B%E5%8C%BB%E7%96%97
180
+ [数学]:https://github.com/baichuan-inc/Baichuan2#%E6%95%B0%E5%AD%A6%E4%BB%A3%E7%A0%81
181
+ [代码]:https://github.com/baichuan-inc/Baichuan2#%E6%95%B0%E5%AD%A6%E4%BB%A3%E7%A0%81
182
+ [多语言翻译]:https://github.com/baichuan-inc/Baichuan2#%E5%A4%9A%E8%AF%AD%E8%A8%80%E7%BF%BB%E8%AF%91
183
+
184
+ [《Baichuan 2 模型社区许可协议》]:https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf
185
+
186
+ [邮件申请]: mailto:opensource@baichuan-inc.com
187
+ [Email]: mailto:opensource@baichuan-inc.com
188
+ [opensource@baichuan-inc.com]: mailto:opensource@baichuan-inc.com
189
+ [训练过程heckpoint下载]: https://huggingface.co/baichuan-inc/Baichuan2-7B-Intermediate-Checkpoints
190
+ [百川智能]: https://www.baichuan-ai.com
191
+
192
+ [CPU]: https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2
193
+ [GPU]: https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2
194
+ ","{""id"": ""baichuan-inc/Baichuan2-7B-Chat"", ""author"": ""baichuan-inc"", ""sha"": ""ea66ced17780ca3db39bc9f8aa601d8463db3da5"", ""last_modified"": ""2024-02-26 08:58:12+00:00"", ""created_at"": ""2023-08-29 02:21:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13874, ""downloads_all_time"": null, ""likes"": 165, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""baichuan"", ""text-generation"", ""custom_code"", ""en"", ""zh"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlicense_name: baichuan2-community-license\nlicense_link: https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/Community%20License%20for%20Baichuan2%20Model.pdf\ntasks:\n- text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""BaichuanForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_baichuan.BaichuanConfig"", ""AutoModelForCausalLM"": ""modeling_baichuan.BaichuanForCausalLM""}, ""model_type"": ""baichuan"", ""tokenizer_config"": {""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": true}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Baichuan2 \u6a21\u578b\u793e\u533a\u8bb8\u53ef\u534f\u8bae.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Community License for Baichuan2 Model.pdf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantizer.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Justinrune/LLaMA-Factory"", ""yhavinga/dutch-tokenizer-arena"", ""kenken999/fastapi_django_main_live"", ""lightmate/llm-chatbot"", ""officialhimanshu595/llama-factory"", ""Zulelee/langchain-chatchat"", ""malvika2003/openvino_notebooks"", ""Yiyuan/VSA"", ""blackwingedkite/gutalk"", ""JiakunXu/chat_with_llm"", ""blackwingedkite/alpaca2_clas"", ""Hndsguy/813-MindSearch"", ""JiakaiDu/RAG_Test"", ""cloneQ/internLMRAG"", ""hujin0929/LlamaIndex_RAG"", ""flyfive0315/internLlamaIndex"", ""sunxiaokang/llamaindex_RAG_web"", ""macota1/axa"", ""Superkingjcj/Lagent"", ""Vic-729/weathersearch_agent"", ""SEUZCYYDS/Lagent"", ""Chipsleep/hgagent"", ""Jianfei217/MultiAgents"", ""Gon04/lagent_demo"", ""kai119/llama"", ""kai119/lagent"", ""sfang32/Agent_Based_on_Lagent"", ""Jianfei217/MulAgent"", ""lt676767/Lagent"", ""xiaoxishui/LAGENT"", ""Raymd9/Lagent"", ""qxy826982153/LlamaIndexRAG"", ""qxy826982153/Lagent"", ""KIKI1031/Lagent_FormoJ"", ""shinelover/InternLM_Lagent"", ""yanyoyo/Lagent"", ""quentinrobot/Lagent_20250109"", ""zhang4096/OneLagent"", ""emajjsky/LAgent"", ""KitHung/Lagent"", ""sanpang/Lagent"", ""Sunshine108/Lagent"", ""quentinrobot/lagent_weatherreport_ArxivSearch"", ""Jianfei217/MulAgentApp"", ""fulekkk/Lagent"", ""Bread-F/Lagent"", ""kxrrrr/Lagent"", ""dstars/lagent"", ""yyyycc/lagentspace"", ""MA-YuS/Intern-study-L2_2"", ""zhaomou/lagent-puyu"", ""minimum-generated-pig/Lagent"", ""Highthoughts/AgentTest"", ""stevending1st/myLagent"", ""z0312z/Lagent"", ""ilemon/Internlm2.5LLaMAindexRAG"", ""shaoshaoshao/agent_by_lagent"", ""leledaidai/Lagent_practice"", ""msun415/Llamole"", ""shuyaya-moon/Lagent"", ""sanjion9/Lagent-Agent"", ""seachen/Lagent-L2G2000-ymh"", ""magic2025/Lagent-Malone"", ""ddddpao/Lagent"", ""kazenokizi/andrew-s-lagent"", ""D4isyC/Lagent-Demo"", ""Penkris/agent"", ""Rtwotwo/Lagent"", ""Testdevk-1/Lagent_Multi-Agent"", ""leishenggungun/Lagent"", ""deswa/Lagent"", ""SutaLXY/internlm_lagent"", ""FMY714/internlm2.5_multiagent"", ""tianleiSHI/Multi-Agent"", ""Taoismimmortal/LagentAPPwithDouble"", ""HuHu1226/L2G2000-Lagent"", ""jiahm/Lagent_Learn"", ""sakuralggm/lagent"", ""kingwe/agents_Lagent"", ""CCCasEEE/internlm_lagent"", ""luckDW1126/Lagent"", ""Penkris/lagent111"", ""wowwang666/test-Multi-Agents1"", ""tjorange/lagent"", ""pipi333/Lingxi_Weather_Sprite""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-26 08:58:12+00:00"", ""cardData"": ""language:\n- en\n- zh\nlicense_name: baichuan2-community-license\nlicense_link: https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/Community%20License%20for%20Baichuan2%20Model.pdf\ntasks:\n- text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""64ed5635b5b93ea8bd06eee6"", ""modelId"": ""baichuan-inc/Baichuan2-7B-Chat"", ""usedStorage"": 30025995676}",0,https://huggingface.co/JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT,1,"https://huggingface.co/li-ping/results_8_can_eos__checkpoint_8500, https://huggingface.co/Dawn90/Baichuan2-7b-medical-notes, https://huggingface.co/hawkling/output, https://huggingface.co/hawkling/answer, https://huggingface.co/tjluyao/baichuan2-7b-chat-lora1, https://huggingface.co/tjluyao/baichuan2-7b-chat-lora2",6,"https://huggingface.co/second-state/Baichuan2-7B-Chat-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q8_0-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q6_K-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q5_K_M-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q5_K_S-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q5_0-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q4_K_M-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q4_K_S-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q4_0-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q3_K_L-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q3_K_S-GGUF, https://huggingface.co/CHE-72/Baichuan2-7B-Chat-Q2_K-GGUF, https://huggingface.co/Junrui2021/Baichuan2-7B-Chat-Q4_K_M-GGUF, https://huggingface.co/mradermacher/Baichuan2-7B-Chat-GGUF, https://huggingface.co/mradermacher/Baichuan2-7B-Chat-i1-GGUF",16,,0,"Jianfei217/MultiAgents, Justinrune/LLaMA-Factory, Penkris/agent, Yiyuan/VSA, Zulelee/langchain-chatchat, blackwingedkite/gutalk, huggingface/InferenceSupport/discussions/new?title=baichuan-inc/Baichuan2-7B-Chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bbaichuan-inc%2FBaichuan2-7B-Chat%5D(%2Fbaichuan-inc%2FBaichuan2-7B-Chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, lightmate/llm-chatbot, malvika2003/openvino_notebooks, officialhimanshu595/llama-factory, xiaoxishui/LAGENT, yhavinga/dutch-tokenizer-arena",13
195
+ JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT,"---
196
+ language:
197
+ - zh
198
+ license: gpl-3.0
199
+ tags:
200
+ - art
201
+ - marketing
202
+ - llama-factory
203
+ base_model: baichuan-inc/Baichuan2-7B-Chat
204
+ metrics:
205
+ - bleu
206
+ pipeline_tag: text-generation
207
+ ---
208
+
209
+
210
+ ## Baichuan2-7B-Chat-DcardStylePost-SFT
211
+
212
+ ## 💻 Usage
213
+
214
+ ```python
215
+ !pip install -qU transformers accelerate
216
+
217
+ from transformers import AutoTokenizer
218
+ import transformers
219
+ import torch
220
+
221
+ model = ""JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT""
222
+ messages = [{""role"": ""user"", ""content"": ""【任务】:\n你是口碑行销公司的资深写手,现在要撰写一篇Dcard 『闲聊版』的口碑文。\n麻烦熟读下列 【参考资料】将商品或品牌与生活情境的融合,并尽量贴近生活口吻,以不要被发现是业配文为重要目标。\n请依照【口碑切角】为文章的故事主轴撰写整篇文章,请参考并依照【规范】角色口吻来撰写文章,融入角色情境来完成整篇故事撰写,请注重人物角色特色及限制。\n\n-\n\n【规范】:\n\n1.需产出文章标题\n2.请以第一人称方式撰写文章\n3.请记住现在是时间是西元 2023 年\n4.Please write in zh-TW language .\n5.降低口碑文业配感\n6.写作者是一个19~25岁之间的女生的大学生或是社会新鲜人\n7.喜爱吃甜食,常常带著男友一起去踩点试吃看看\n8.除了介绍食物,也喜欢顺便男友发生好笑的趣事来跟食物介绍的故事结合\n9.为降低口碑文业配感,请多多举几个甜点推荐\n10.喜欢应用很多可爱的颜文字和emoji\n\n-\n\n【参考资料】\n▲可丽露\n>>龙眼蜜,所以吃起来不会这么甜,跟其他家的可丽露吃起来真的很有差异\n以野生龙眼蜜减低并取代部分甜度,带出微微酸感的蛋蜜香,外脆内湿润的口感,完整的蜂巢组织度,木质调的兰姆酒香,法国盐之花平衡了整体,经典细致的马达加斯加香草籽原味,请在出炉后的3小时内食用完毕或\""冷冻\""保存,回烤后食用最接近现烤口感!\n\n\n\n▲奶盖布丁\n>>法国盐之花,连盐巴都很用心的甜点师\n带咸度的法国盐之花奶盖,微甜浓郁而不腻口的布蕾布丁体,和著偏苦的手煮焦糖液,是一款有著丰富层次的大人味布丁! 图片为示意仅供参考,食用时请由上方挖到底,品尝完整风味~\n\n【口碑切角】\n男友就像金鱼一样,好像记忆都只有三秒,\n只有三秒就算了还说错很多很好笑的话XD\n我都会带甜点回去给男友吃~结果男友居然说玛莉露很好吃XD\n玛莉露是神奇宝贝,可丽露才是甜点啦!\n分享日常男友都会口误的甜点们""}]
223
+
224
+ tokenizer = AutoTokenizer.from_pretrained(model)
225
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
226
+ pipeline = transformers.pipeline(
227
+ ""text-generation"",
228
+ model=model,
229
+ torch_dtype=torch.float16,
230
+ device_map=""auto"",
231
+ )
232
+
233
+ outputs = pipeline(prompt, max_new_tokens=512, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
234
+ print(outputs[0][""generated_text""])
235
+ ```","{""id"": ""JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT"", ""author"": ""JiunYi"", ""sha"": ""87d1b59085ce3ebb94ed64471d2d42ccb89ed354"", ""last_modified"": ""2024-05-01 16:49:58+00:00"", ""created_at"": ""2024-05-01 11:06:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""baichuan"", ""feature-extraction"", ""art"", ""marketing"", ""llama-factory"", ""text-generation"", ""conversational"", ""custom_code"", ""zh"", ""base_model:baichuan-inc/Baichuan2-7B-Chat"", ""base_model:finetune:baichuan-inc/Baichuan2-7B-Chat"", ""license:gpl-3.0"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: baichuan-inc/Baichuan2-7B-Chat\nlanguage:\n- zh\nlicense: gpl-3.0\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- art\n- marketing\n- llama-factory"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""BaichuanForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_baichuan.BaichuanConfig"", ""AutoModel"": ""modeling_baichuan.BaichuanForCausalLM"", ""AutoModelForCausalLM"": ""baichuan-inc/Baichuan2-7B-Chat--modeling_baichuan.BaichuanForCausalLM""}, ""model_type"": ""baichuan"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<reserved_106>' + content + '<reserved_107>' }}{% elif message['role'] == 'assistant' %}{{ content }}{% endif %}{% endfor %}"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00016.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantizer.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_baichuan.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 7505973248}, ""total"": 7505973248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-01 16:49:58+00:00"", ""cardData"": ""base_model: baichuan-inc/Baichuan2-7B-Chat\nlanguage:\n- zh\nlicense: gpl-3.0\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- art\n- marketing\n- llama-factory"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_baichuan.BaichuanForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""6632224fb1c6e12e1c60161c"", ""modelId"": ""JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT"", ""usedStorage"": 15013973891}",1,,0,,0,https://huggingface.co/mradermacher/Baichuan2-7B-Chat-DcardStylePost-SFT-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=JiunYi/Baichuan2-7B-Chat-DcardStylePost-SFT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BJiunYi%2FBaichuan2-7B-Chat-DcardStylePost-SFT%5D(%2FJiunYi%2FBaichuan2-7B-Chat-DcardStylePost-SFT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Cerebras-GPT-13B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ cerebras/Cerebras-GPT-13B,"---
3
+ language:
4
+ - en
5
+ inference: false
6
+ tags:
7
+ - pytorch
8
+ - causal-lm
9
+ license: apache-2.0
10
+ datasets:
11
+ - the_pile
12
+ pipeline_tag: text-generation
13
+ ---
14
+
15
+ # Cerebras-GPT 13B
16
+ Check out our [Blog Post](https://www.cerebras.net/cerebras-gpt) and [arXiv paper](https://arxiv.org/abs/2304.03208)!
17
+
18
+ ## Model Description
19
+
20
+ The Cerebras-GPT family is released to facilitate research into LLM scaling laws using open architectures and data sets and demonstrate the simplicity of and scalability of training LLMs on the Cerebras software and hardware stack. All Cerebras-GPT models are available on Hugging Face.
21
+
22
+ The family includes 111M, 256M, 590M, 1.3B, 2.7B, 6.7B, and 13B models.
23
+
24
+ All models in the Cerebras-GPT family have been trained in accordance with [Chinchilla scaling laws](https://arxiv.org/abs/2203.15556) (20 tokens per model parameter) which is compute-optimal.
25
+
26
+ These models were trained on the [Andromeda](https://www.cerebras.net/andromeda/) AI supercomputer comprised of 16 CS-2 wafer scale systems. Cerebras' [weight streaming technology](https://www.cerebras.net/blog/linear-scaling-made-possible-with-weight-streaming) simplifies the training of LLMs by disaggregating compute from model storage. This allowed for efficient scaling of training across nodes using simple data parallelism.
27
+
28
+ Cerebras systems for pre-training and fine tuning are available in the cloud via the [Cerebras Model Studio](https://www.cerebras.net/product-cloud/). Cerebras CS-2 compatible checkpoints are available in [Cerebras Model Zoo](https://github.com/Cerebras/modelzoo).
29
+
30
+ ## Model Details
31
+ * Developed by: [Cerebras Systems](https://www.cerebras.net/)
32
+ * License: Apache 2.0
33
+ * Model type: Transformer-based Language Model
34
+ * Architecture: GPT-3 style architecture
35
+ * Data set: The Pile
36
+ * Tokenizer: Byte Pair Encoding
37
+ * Vocabulary Size: 50257
38
+ * Sequence Length: 2048
39
+ * Optimizer: AdamW, (β1, β2) = (0.9, 0.95), adam_eps = 1e−8 (1e−9 for larger models)
40
+ * Positional Encoding: Learned
41
+ * Language: English
42
+ * Learn more: Dense Scaling Laws Paper for training procedure, config files, and details on how to use.
43
+
44
+ **Contact**: To ask questions about Cerebras-GPT models, join the [Cerebras Discord](https://discord.gg/q6bZcMWJVu).
45
+
46
+ This is the standard parameterization version of Cerebras-GPT with **13B** parameters
47
+
48
+ Related models: [Cerebras-GPT Models](https://huggingface.co/models?sort=downloads&search=cerebras-gpt)
49
+
50
+ <br><br>
51
+
52
+ | Model | Parameters | Layers | d_model | Heads | d_head | d_ffn | LR | BS (seq) | BS (tokens) |
53
+ |---------------|------------|--------|---------|-------|--------|--------|----------|----------|----------------|
54
+ | Cerebras-GPT | 111M | 10 | 768 | 12 | 64 | 3072 | 6.0E-04 | 120 | 246K |
55
+ | Cerebras-GPT | 256M | 14 | 1088 | 17 | 64 | 4352 | 6.0E-04 | 264 | 541K |
56
+ | Cerebras-GPT | 590M | 18 | 1536 | 12 | 128 | 6144 | 2.0E-04 | 264 | 541K |
57
+ | Cerebras-GPT | 1.3B | 24 | 2048 | 16 | 128 | 8192 | 2.0E-04 | 528 | 1.08M |
58
+ | Cerebras-GPT | 2.7B | 32 | 2560 | 32 | 80 | 10240 | 2.0E-04 | 528 | 1.08M |
59
+ | Cerebras-GPT | 6.7B | 32 | 4096 | 32 | 128 | 16384 | 1.2E-04 | 1040 | 2.13M |
60
+ | Cerebras-GPT | 13B | 40 | 5120 | 40 | 128 | 20480 | 1.2E-04 | 720 &rarr; 1080 | 1.47M &rarr; 2.21M |
61
+
62
+ <br><br>
63
+
64
+ ## Quickstart
65
+
66
+ This model can be easily loaded using the AutoModelForCausalLM functionality:
67
+ ```python
68
+ from transformers import AutoTokenizer, AutoModelForCausalLM
69
+
70
+ tokenizer = AutoTokenizer.from_pretrained(""cerebras/Cerebras-GPT-13B"")
71
+ model = AutoModelForCausalLM.from_pretrained(""cerebras/Cerebras-GPT-13B"")
72
+
73
+ text = ""Generative AI is ""
74
+ ```
75
+
76
+ And can be used with Hugging Face Pipelines
77
+
78
+ ```python
79
+ from transformers import pipeline
80
+
81
+ pipe = pipeline(""text-generation"", model=model, tokenizer=tokenizer)
82
+ generated_text = pipe(text, max_length=50, do_sample=False, no_repeat_ngram_size=2)[0]
83
+ print(generated_text['generated_text'])
84
+ ```
85
+
86
+ or with `model.generate()`
87
+
88
+ ```python
89
+ inputs = tokenizer(text, return_tensors=""pt"")
90
+ outputs = model.generate(**inputs, num_beams=5,
91
+ max_new_tokens=50, early_stopping=True,
92
+ no_repeat_ngram_size=2)
93
+ text_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
94
+ print(text_output[0])
95
+ ```
96
+ <br><br>
97
+
98
+ ## Training data
99
+
100
+ Cerebras-GPT is trained using [the Pile](https://pile.eleuther.ai) dataset from [EleutherAI](https://www.eleuther.ai). See the [Pile paper](https://arxiv.org/abs/2101.00027) for a more detailed breakdown of data sources and methodology. The Pile was cleaned using the ftfy library to normalize the text, then filtered using scripts provided by Eleuther.
101
+
102
+ We tokenized the data using byte-pair encoding using the GPT-2 vocabulary. Our tokenized version of the Pile has 371B tokens. We include more details about the training dataset preprocessing in Appendix A.1 of our paper.
103
+
104
+ Recent works find significant duplicate data present in the Pile. Eleuther’s Pythia applies a deduplication process to reduce replicated data, decreasing the Pile dataset size. Pythia was trained on both the standard dataset and deduplicated dataset to characterize the impact. Our models are trained on the standard Pile without deduplication, which may present an opportunity for further improvement with the deduplicated data set.
105
+
106
+ <br><br>
107
+
108
+ ## Training procedure
109
+
110
+ We use the GPT-3 style model architecture. All of our layers use full attention as opposed to the GPT-3 style sparse banded attention. The model shapes were selected to either follow aspect ratio 80 or are the same shape as GPT-3 models. Learning rate warmed up for 375M tokens (1500 steps for 111M and 256M models) and 10x cosine decayed. No dropout was used and weight decay was set to 0.1. All models are trained with MSL of 2048.
111
+
112
+ All models were trained to Chinchilla point: 20 tokens per model parameter. Number of steps was chosen based on optimal batch size (varied by model) and fixed sequence length (2048). See Training Table, below, for details.
113
+
114
+ <br>
115
+
116
+ Model Params | Sequence Length | Batch Size | Number of Steps | Tokens | Tokens per Parameter | Flops
117
+ ------------ | -------------- | ---------- | --------------- | ------ | -------------------- | -----
118
+ 111M | 2048 | 120 | 9037 | 2.22E+09 | 20 | 2.6E+18
119
+ 256M | 2048 | 264 | 9468 | 5.12E+09 | 20 | 1.3E+19
120
+ 590M | 2048 | 264 | 21836 | 1.18E+10 | 20 | 6.1E+19
121
+ 1.3B | 2048 | 528 | 24334 | 2.63E+10 | 20 | 2.8E+20
122
+ 2.7B | 2048 | 528 | 49041 | 5.30E+10 | 20 | 1.1E+21
123
+ 6.7B | 2048 | 1040 | 62522 | 1.33E+11 | 20 | 6.3E+21
124
+ 13B | 2048 | 720 | 174335 | 2.57E+11 | 20 | 2.3E+22
125
+
126
+ <br><br>
127
+
128
+ ## Evaluations
129
+
130
+ We trained models from smallest to largest and fit a power law as we went along. The power law was helpful for extrapolating the validation loss of the next largest model we trained and provided confidence about whether the training run was going well.
131
+
132
+ We performed upstream (pre-training) evaluations of text prediction cross-entropy using the Pile validation and test splits. We performed downstream evaluations of text generation accuracy on standardized tasks using the [Eleuther lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). Results are compared against many publicly available large language models in Section 3 of the paper.
133
+
134
+
135
+ #### 0-shot Evaluation
136
+ | Model | Params | Training FLOPs | PILE test xent | Hella-Swag | PIQA | Wino-Grande | Lambada | ARC-e | ARC-c | OpenBookQA | Downstream Average |
137
+ | ------- | ----- | -------------- | -------------- | ---------- | ----- | ----------- | ------- | ----- | ----- | ---------- | ------------------ |
138
+ | Cerebras-GPT | 111M | 2.6E+18 | 2.566 | 0.268 | 0.594 | 0.488 | 0.194 | 0.380 | 0.166 | 0.118 | 0.315 |
139
+ | Cerebras-GPT | 256M | 1.3E+19 | 2.299 | 0.274 | 0.613 | 0.511 | 0.293 | 0.410 | 0.170 | 0.158 | 0.347 |
140
+ | Cerebras-GPT | 590M | 6.1E+19 | 2.184 | 0.291 | 0.627 | 0.498 | 0.366 | 0.464 | 0.190 | 0.158 | 0.370 |
141
+ | Cerebras-GPT | 1.3B | 2.8E+20 | 1.996 | 0.325 | 0.664 | 0.521 | 0.462 | 0.508 | 0.224 | 0.166 | 0.410 |
142
+ | Cerebras-GPT | 2.7B | 1.1E+21 | 1.834 | 0.386 | 0.701 | 0.559 | 0.567 | 0.571 | 0.246 | 0.206 | 0.462 |
143
+ | Cerebras-GPT | 6.7B | 6.3E+21 | 1.704 | 0.447 | 0.739 | 0.602 | 0.636 | 0.643 | 0.282 | 0.238 | 0.512 |
144
+ | Cerebras-GPT | 13B | 2.3E+22 | 1.575 | 0.513 | 0.766 | 0.646 | 0.696 | 0.714 | 0.367 | 0.286 | 0.570 |
145
+
146
+ #### 5-shot Evaluation
147
+ | Model | Params | Hella-Swag | PIQA | Wino-Grande | Lambada | ARC-e | ARC-c | OpenBookQA |
148
+ | -------- | ----- | ----------| ----- | ----------- | -------| ----- | ----- | ---------- |
149
+ | Cerebras-GPT | 111M | 0.267 | 0.588 | 0.475 | 0.158 | 0.356 | 0.166 | 0.136 |
150
+ | Cerebras-GPT | 256M | 0.278 | 0.606 | 0.522 | 0.225 | 0.422 | 0.183 | 0.164 |
151
+ | Cerebras-GPT | 590M | 0.291 | 0.634 | 0.479 | 0.281 | 0.475 | 0.206 | 0.152 |
152
+ | Cerebras-GPT | 1.3B | 0.326 | 0.668 | 0.536 | 0.395 | 0.529 | 0.241 | 0.174 |
153
+ | Cerebras-GPT | 2.7B | 0.382 | 0.697 | 0.543 | 0.487 | 0.590 | 0.267 | 0.224 |
154
+ | Cerebras-GPT | 6.7B | 0.444 | 0.736 | 0.590 | 0.591 | 0.667 | 0.314 | 0.270 |
155
+ | Cerebras-GPT | 13B | 0.514 | 0.768 | 0.674 | 0.655 | 0.743 | 0.398 | 0.318 |
156
+
157
+
158
+ <br><br>
159
+
160
+ ## Uses and Limitations
161
+
162
+ ### Intended Use
163
+ The primary intended use is to further research into large language models. These models can be used as a foundation model for NLP, applications, ethics, and alignment research. Our primary intended users are researchers who are working to improve LLMs and practitioners seeking reference implementations, training setups, hyperparameters, or pre-trained models. We release these models with a fully permissive Apache license for the community to use freely.
164
+
165
+ You may fine-tune and adapt Cerebras-GPT models for deployment via either Cerebras [Model Studio](https://www.cerebras.net/product-cloud/) or third-party libraries. Further safety-related testing and mitigations should be applied beore using the Cerebras-GPT model family in production downstream applications.
166
+
167
+ Due to financial and compute budgets, Cerebras-GPT models were only trained and evaluated following the approaches described in the paper.
168
+
169
+ ### Out of Scope Use
170
+ Cerebras-GPT models are trained on the Pile, with English language only, and are not suitable for machine translation tasks.
171
+
172
+ Cerebras-GPT models have not been tuned for human-facing dialog applications like chatbots and will not respond to prompts in a similar way to models that have received instruction tuning or reinforcement learning from human feedback (RLHF) like Flan-T5 or ChatGPT. Cerebras-GPT models can be tuned using those methods.
173
+
174
+ ### Risk, Bias, Ethical Considerations
175
+ * **Data**: The Pile dataset has been thoroughly analyzed from various ethical standpoints such as toxicity analysis, gender bias, pejorative content, racially sensitive content etc. Please refer to Pile dataset references.
176
+ * **Human life**: The outputs from this model may or may not align with human values. The risk needs to be thoroughly investigated before deploying this model in a production environment where it can directly impact human life.
177
+ * **Risks and harms**: There can be distributional bias in the Pile dataset that can manifest in various forms in the downstream model deployment. There are other risks associated with large language models such as amplifying stereotypes, memorizing training data, or revealing private or secure information.
178
+ * **Mitigations**: Only mitigations in standard Pile dataset pre-processing were employed when pre-training Cerebras-GPT.
179
+
180
+ <br><br>
181
+
182
+ ## Acknowledgements
183
+
184
+ We are thankful to all Cerebras engineers, past and present, that made this work possible.","{""id"": ""cerebras/Cerebras-GPT-13B"", ""author"": ""cerebras"", ""sha"": ""0600d825e90eaf087b013c4146c1d7d460b831c0"", ""last_modified"": ""2023-11-22 21:49:12+00:00"", ""created_at"": ""2023-03-20 20:45:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 588, ""downloads_all_time"": null, ""likes"": 646, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gpt2"", ""feature-extraction"", ""causal-lm"", ""text-generation"", ""en"", ""dataset:the_pile"", ""arxiv:2304.03208"", ""arxiv:2203.15556"", ""arxiv:2101.00027"", ""license:apache-2.0"", ""text-generation-inference"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- the_pile\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- pytorch\n- causal-lm\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPT2Model""], ""model_type"": ""gpt2""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": ""feature-extraction"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""gsaivinay/open_llm_leaderboard"", ""aimevzulari/Prompt_Uzmani"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""shaoyie/Cerebras"", ""smothiki/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard_two"", ""wissamantoun/LLM_Detection_Attribution"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""K00B404/One-stop-till-you-drop"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""mjalg/IFEvalTR"", ""therayz1/Prompt_Engineer"", ""kayrahan/promtmuhendisi""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-22 21:49:12+00:00"", ""cardData"": ""datasets:\n- the_pile\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- pytorch\n- causal-lm\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": ""feature-extraction"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6418c60283d469238a51306f"", ""modelId"": ""cerebras/Cerebras-GPT-13B"", ""usedStorage"": 103162871514}",0,,0,,0,,0,,0,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Sharathhebbar24/One-stop-for-Open-source-models, Vikhrmodels/small-shlepa-lb, aimevzulari/Prompt_Uzmani, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=cerebras/Cerebras-GPT-13B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcerebras%2FCerebras-GPT-13B%5D(%2Fcerebras%2FCerebras-GPT-13B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kayrahan/promtmuhendisi, therayz1/Prompt_Engineer, wissamantoun/LLM_Detection_Attribution",13
ChatTTS_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ 2Noise/ChatTTS,"---
3
+ license: cc-by-nc-4.0
4
+ library_name: chat_tts
5
+ pipeline_tag: text-to-audio
6
+ ---
7
+
8
+
9
+ **We are also training larger-scale models and need computational power and data support. If you can provide assistance, please contact OPEN-SOURCE@2NOISE.COM. Thank you very much.**
10
+
11
+ ## Clone the Repository
12
+ First, clone the Git repository:
13
+ ```bash
14
+ git clone https://github.com/2noise/ChatTTS.git
15
+ ```
16
+
17
+ ## Model Inference
18
+
19
+
20
+ ```python
21
+ # Import necessary libraries and configure settings
22
+ import torch
23
+ import torchaudio
24
+ torch._dynamo.config.cache_size_limit = 64
25
+ torch._dynamo.config.suppress_errors = True
26
+ torch.set_float32_matmul_precision('high')
27
+
28
+ import ChatTTS
29
+ from IPython.display import Audio
30
+
31
+ # Initialize and load the model:
32
+ chat = ChatTTS.Chat()
33
+ chat.load_models(compile=False) # Set to True for better performance
34
+
35
+ # Define the text input for inference (Support Batching)
36
+ texts = [
37
+ ""So we found being competitive and collaborative was a huge way of staying motivated towards our goals, so one person to call when you fall off, one person who gets you back on then one person to actually do the activity with."",
38
+ ]
39
+
40
+ # Perform inference and play the generated audio
41
+ wavs = chat.infer(texts)
42
+ Audio(wavs[0], rate=24_000, autoplay=True)
43
+
44
+ # Save the generated audio
45
+ torchaudio.save(""output.wav"", torch.from_numpy(wavs[0]), 24000)
46
+ ```
47
+ **For more usage examples, please refer to the [example notebook](https://github.com/2noise/ChatTTS/blob/main/example.ipynb), which includes parameters for finer control over the generated speech, such as specifying the speaker, adjusting speech speed, and adding laughter.**
48
+
49
+
50
+
51
+
52
+
53
+
54
+ ### Disclaimer: For Academic Purposes Only
55
+
56
+ The information provided in this document is for academic purposes only. It is intended for educational and research use, and should not be used for any commercial or legal purposes. The authors do not guarantee the accuracy, completeness, or reliability of the information.","{""id"": ""2Noise/ChatTTS"", ""author"": ""2Noise"", ""sha"": ""1a3c04a8b0651689bd9242fbb55b1f4b5a9aef84"", ""last_modified"": ""2024-10-22 08:26:20+00:00"", ""created_at"": ""2024-05-25 06:07:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2918, ""downloads_all_time"": null, ""likes"": 1551, ""library_name"": ""chat_tts"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""chat_tts"", ""safetensors"", ""text-to-audio"", ""license:cc-by-nc-4.0"", ""region:us""], ""pipeline_tag"": ""text-to-audio"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: chat_tts\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-audio"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/DVAE.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/DVAE.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/DVAE_full.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/Decoder.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/Decoder.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/Embed.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/GPT.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/Vocos.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/Vocos.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/gpt/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/gpt/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/spk_stat.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/tokenizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='asset/tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config/decoder.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config/dvae.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config/gpt.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config/path.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config/vocos.yaml', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Dzkaka/ChatTTS"", ""lenML/ChatTTS-Forge"", ""Hilley/ChatTTS-OpenVoice"", ""markmagic/ChatTTS"", ""Hilley/ChatVC"", ""wffcyrus/ChatTTS-Story-Telling"", ""6Simple9/ChatTTS-OpenVoice"", ""fcyai/ChatTTS"", ""rao223/ChatTTS-Forge"", ""chenmgtea/chat-tts"", ""doby4u/chattts"", ""cbhhhcb/ChatTTS"", ""zelk12/ChatTTS-Forge_English_interface"", ""rainnee0925/ChatTTS"", ""prajjwalkapoor/tts"", ""fcyai/ChatTTS-Story-Telling"", ""sysf/ChatTTS"", ""savokiss/ChatTTS"", ""savokiss/chattts-free"", ""arpy8/chattts"", ""lisongfeng/ChatTTS-WebUI"", ""rainnee/ChatTTS"", ""wffcyrus/ChatTTS-Forge"", ""zzhouz/learningself"", ""docs4dev/gptalk"", ""sandy-try/ChatTTS-Forge"", ""slingkid/ChatVC2"", ""panyanyany/ChatTTS"", ""slingkid/ChatVC4"", ""Rdtuetr/ChatTTS"", ""hikerxu/ChatTTS"", ""zhzabcd/ChatTTS-Forge"", ""jdhsi/ChatTTS"", ""AwesomeK/ChatTTS-OpenVoice"", ""zhengr/ChatTTS-Forge"", ""zhengr/ChatTTS2"", ""weismart1807/Linly-Talker"", ""emilalvaro/ChatTTS-OpenVoice"", ""vuxuanhoan/ChatTTS-Forge"", ""MaktubCN/Chat-TTS"", ""EZMODEL/chattts-free"", ""chenjacky131/ChatTTS-Forge"", ""lalalic/chattts"", ""yamazing/ChatTTS"", ""zhangyanhua0913/ChatTTS-OpenVoice"", ""thierryguyot67/tts"", ""EagleW96/CC_ChatTTS_demo""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-22 08:26:20+00:00"", ""cardData"": ""library_name: chat_tts\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-audio"", ""transformersInfo"": null, ""_id"": ""6651802a815d7642d5aaef56"", ""modelId"": ""2Noise/ChatTTS"", ""usedStorage"": 2364745599}",0,,0,,0,,0,,0,"6Simple9/ChatTTS-OpenVoice, Dzkaka/ChatTTS, EagleW96/CC_ChatTTS_demo, Hilley/ChatTTS-OpenVoice, Hilley/ChatVC, cbhhhcb/ChatTTS, huggingface/InferenceSupport/discussions/546, lenML/ChatTTS-Forge, panyanyany/ChatTTS, rao223/ChatTTS-Forge, savokiss/chattts-free, wffcyrus/ChatTTS-Forge, zelk12/ChatTTS-Forge_English_interface",13
CogVideoX-5b-I2V_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ THUDM/CogVideoX-5b-I2V,"---
3
+ license: other
4
+ license_link: https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/LICENSE
5
+ language:
6
+ - en
7
+ tags:
8
+ - cogvideox
9
+ - video-generation
10
+ - thudm
11
+ - image-to-video
12
+ inference: false
13
+ ---
14
+
15
+ # CogVideoX-5B-I2V
16
+
17
+ <p style=""text-align: center;"">
18
+ <div align=""center"">
19
+ <img src=https://github.com/THUDM/CogVideo/raw/main/resources/logo.svg width=""50%""/>
20
+ </div>
21
+ <p align=""center"">
22
+ <a href=""https://huggingface.co/THUDM//CogVideoX-5b-I2V/blob/main/README.md"">📄 Read in English</a> |
23
+ <a href=""https://huggingface.co/spaces/THUDM/CogVideoX-5B-Space"">🤗 Huggingface Space</a> |
24
+ <a href=""https://github.com/THUDM/CogVideo"">🌐 Github </a> |
25
+ <a href=""https://arxiv.org/pdf/2408.06072"">📜 arxiv </a>
26
+ </p>
27
+ <p align=""center"">
28
+ 📍 Visit <a href=""https://chatglm.cn/video?fr=osm_cogvideox"">Qingying</a> and <a href=""https://open.bigmodel.cn/?utm_campaign=open&_channel_track_key=OWTVNma9"">API Platform</a> for the commercial version of the video generation model
29
+ </p>
30
+
31
+ ## Model Introduction
32
+
33
+ CogVideoX is an open-source video generation model originating
34
+ from [Qingying](https://chatglm.cn/video?fr=osm_cogvideo). The table below presents information related to the video
35
+ generation models we offer in this version.
36
+
37
+ <table style=""border-collapse: collapse; width: 100%;"">
38
+ <tr>
39
+ <th style=""text-align: center;"">Model Name</th>
40
+ <th style=""text-align: center;"">CogVideoX-2B</th>
41
+ <th style=""text-align: center;"">CogVideoX-5B</th>
42
+ <th style=""text-align: center;"">CogVideoX-5B-I2V (This Repository)</th>
43
+ </tr>
44
+ <tr>
45
+ <td style=""text-align: center;"">Model Description</td>
46
+ <td style=""text-align: center;"">Entry-level model, balancing compatibility. Low cost for running and secondary development.</td>
47
+ <td style=""text-align: center;"">Larger model with higher video generation quality and better visual effects.</td>
48
+ <td style=""text-align: center;"">CogVideoX-5B image-to-video version.</td>
49
+ </tr>
50
+ <tr>
51
+ <td style=""text-align: center;"">Inference Precision</td>
52
+ <td style=""text-align: center;""><b>FP16*(recommended)</b>, BF16, FP32, FP8*, INT8, not supported: INT4</td>
53
+ <td colspan=""2"" style=""text-align: center;""><b>BF16 (recommended)</b>, FP16, FP32, FP8*, INT8, not supported: INT4</td>
54
+ </tr>
55
+ <tr>
56
+ <td style=""text-align: center;"">Single GPU Memory Usage<br></td>
57
+ <td style=""text-align: center;""><a href=""https://github.com/THUDM/SwissArmyTransformer"">SAT</a> FP16: 18GB <br><b>diffusers FP16: from 4GB* </b><br><b>diffusers INT8 (torchao): from 3.6GB*</b></td>
58
+ <td colspan=""2"" style=""text-align: center;""><a href=""https://github.com/THUDM/SwissArmyTransformer"">SAT</a> BF16: 26GB <br><b>diffusers BF16: from 5GB* </b><br><b>diffusers INT8 (torchao): from 4.4GB*</b></td>
59
+ </tr>
60
+ <tr>
61
+ <td style=""text-align: center;"">Multi-GPU Inference Memory Usage</td>
62
+ <td style=""text-align: center;""><b>FP16: 10GB* using diffusers</b><br></td>
63
+ <td colspan=""2"" style=""text-align: center;""><b>BF16: 15GB* using diffusers</b><br></td>
64
+ </tr>
65
+ <tr>
66
+ <td style=""text-align: center;"">Inference Speed<br>(Step = 50, FP/BF16)</td>
67
+ <td style=""text-align: center;"">Single A100: ~90 seconds<br>Single H100: ~45 seconds</td>
68
+ <td colspan=""2"" style=""text-align: center;"">Single A100: ~180 seconds<br>Single H100: ~90 seconds</td>
69
+ </tr>
70
+ <tr>
71
+ <td style=""text-align: center;"">Fine-tuning Precision</td>
72
+ <td style=""text-align: center;""><b>FP16</b></td>
73
+ <td colspan=""2"" style=""text-align: center;""><b>BF16</b></td>
74
+ </tr>
75
+ <tr>
76
+ <td style=""text-align: center;"">Fine-tuning Memory Usage</td>
77
+ <td style=""text-align: center;"">47 GB (bs=1, LORA)<br> 61 GB (bs=2, LORA)<br> 62GB (bs=1, SFT)</td>
78
+ <td style=""text-align: center;"">63 GB (bs=1, LORA)<br> 80 GB (bs=2, LORA)<br> 75GB (bs=1, SFT)<br></td>
79
+ <td style=""text-align: center;"">78 GB (bs=1, LORA)<br> 75GB (bs=1, SFT, 16GPU)<br></td>
80
+ </tr>
81
+ <tr>
82
+ <td style=""text-align: center;"">Prompt Language</td>
83
+ <td colspan=""3"" style=""text-align: center;"">English*</td>
84
+ </tr>
85
+ <tr>
86
+ <td style=""text-align: center;"">Maximum Prompt Length</td>
87
+ <td colspan=""3"" style=""text-align: center;"">226 Tokens</td>
88
+ </tr>
89
+ <tr>
90
+ <td style=""text-align: center;"">Video Length</td>
91
+ <td colspan=""3"" style=""text-align: center;"">6 Seconds</td>
92
+ </tr>
93
+ <tr>
94
+ <td style=""text-align: center;"">Frame Rate</td>
95
+ <td colspan=""3"" style=""text-align: center;"">8 Frames / Second</td>
96
+ </tr>
97
+ <tr>
98
+ <td style=""text-align: center;"">Video Resolution</td>
99
+ <td colspan=""3"" style=""text-align: center;"">720 x 480, no support for other resolutions (including fine-tuning)</td>
100
+ </tr>
101
+ <tr>
102
+ <td style=""text-align: center;"">Position Embedding</td>
103
+ <td style=""text-align: center;"">3d_sincos_pos_embed</td>
104
+ <td style=""text-align: center;"">3d_rope_pos_embed</td>
105
+ <td style=""text-align: center;"">3d_rope_pos_embed + learnable_pos_embed</td>
106
+ </tr>
107
+ </table>
108
+
109
+ **Data Explanation**
110
+
111
+ + While testing using the diffusers library, all optimizations included in the diffusers library were enabled. This
112
+ scheme has not been tested for actual memory usage on devices outside of **NVIDIA A100 / H100** architectures.
113
+ Generally, this scheme can be adapted to all **NVIDIA Ampere architecture** and above devices. If optimizations are
114
+ disabled, memory consumption will multiply, with peak memory usage being about 3 times the value in the table.
115
+ However, speed will increase by about 3-4 times. You can selectively disable some optimizations, including:
116
+
117
+ ```
118
+ pipe.enable_sequential_cpu_offload()
119
+ pipe.vae.enable_slicing()
120
+ pipe.vae.enable_tiling()
121
+ ```
122
+
123
+ + For multi-GPU inference, the `enable_sequential_cpu_offload()` optimization needs to be disabled.
124
+ + Using INT8 models will slow down inference, which is done to accommodate lower-memory GPUs while maintaining minimal
125
+ video quality loss, though inference speed will significantly decrease.
126
+ + The CogVideoX-2B model was trained in `FP16` precision, and all CogVideoX-5B models were trained in `BF16` precision.
127
+ We recommend using the precision in which the model was trained for inference.
128
+ + [PytorchAO](https://github.com/pytorch/ao) and [Optimum-quanto](https://github.com/huggingface/optimum-quanto/) can be
129
+ used to quantize the text encoder, transformer, and VAE modules to reduce the memory requirements of CogVideoX. This
130
+ allows the model to run on free T4 Colabs or GPUs with smaller memory! Also, note that TorchAO quantization is fully
131
+ compatible with `torch.compile`, which can significantly improve inference speed. FP8 precision must be used on
132
+ devices with NVIDIA H100 and above, requiring source installation of `torch`, `torchao`, `diffusers`, and `accelerate`
133
+ Python packages. CUDA 12.4 is recommended.
134
+ + The inference speed tests also used the above memory optimization scheme. Without memory optimization, inference speed
135
+ increases by about 10%. Only the `diffusers` version of the model supports quantization.
136
+ + The model only supports English input; other languages can be translated into English for use via large model
137
+ refinement.
138
+ + The memory usage of model fine-tuning is tested in an `8 * H100` environment, and the program automatically
139
+ uses `Zero 2` optimization. If a specific number of GPUs is marked in the table, that number or more GPUs must be used
140
+ for fine-tuning.
141
+
142
+ **Reminders**
143
+
144
+ + Use [SAT](https://github.com/THUDM/SwissArmyTransformer) for inference and fine-tuning SAT version models. Feel free
145
+ to visit our GitHub for more details.
146
+
147
+ ## Getting Started Quickly 🤗
148
+
149
+ This model supports deployment using the Hugging Face diffusers library. You can follow the steps below to get started.
150
+
151
+ **We recommend that you visit our [GitHub](https://github.com/THUDM/CogVideo) to check out prompt optimization and
152
+ conversion to get a better experience.**
153
+
154
+ 1. Install the required dependencies
155
+
156
+ ```shell
157
+ # diffusers>=0.30.3
158
+ # transformers>=0.44.2
159
+ # accelerate>=0.34.0
160
+ # imageio-ffmpeg>=0.5.1
161
+ pip install --upgrade transformers accelerate diffusers imageio-ffmpeg
162
+ ```
163
+
164
+ 2. Run the code
165
+
166
+ ```python
167
+ import torch
168
+ from diffusers import CogVideoXImageToVideoPipeline
169
+ from diffusers.utils import export_to_video, load_image
170
+
171
+ prompt = ""A little girl is riding a bicycle at high speed. Focused, detailed, realistic.""
172
+ image = load_image(image=""input.jpg"")
173
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
174
+ ""THUDM/CogVideoX-5b-I2V"",
175
+ torch_dtype=torch.bfloat16
176
+ )
177
+
178
+ pipe.enable_sequential_cpu_offload()
179
+ pipe.vae.enable_tiling()
180
+ pipe.vae.enable_slicing()
181
+
182
+ video = pipe(
183
+ prompt=prompt,
184
+ image=image,
185
+ num_videos_per_prompt=1,
186
+ num_inference_steps=50,
187
+ num_frames=49,
188
+ guidance_scale=6,
189
+ generator=torch.Generator(device=""cuda"").manual_seed(42),
190
+ ).frames[0]
191
+
192
+ export_to_video(video, ""output.mp4"", fps=8)
193
+ ```
194
+
195
+ ## Quantized Inference
196
+
197
+ [PytorchAO](https://github.com/pytorch/ao) and [Optimum-quanto](https://github.com/huggingface/optimum-quanto/) can be
198
+ used to quantize the text encoder, transformer, and VAE modules to reduce CogVideoX's memory requirements. This allows
199
+ the model to run on free T4 Colab or GPUs with lower VRAM! Also, note that TorchAO quantization is fully compatible
200
+ with `torch.compile`, which can significantly accelerate inference.
201
+
202
+ ```
203
+ # To get started, PytorchAO needs to be installed from the GitHub source and PyTorch Nightly.
204
+ # Source and nightly installation is only required until the next release.
205
+
206
+ import torch
207
+ from diffusers import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel, CogVideoXImageToVideoPipeline
208
+ from diffusers.utils import export_to_video, load_image
209
+ from transformers import T5EncoderModel
210
+ from torchao.quantization import quantize_, int8_weight_only
211
+
212
+ quantization = int8_weight_only
213
+
214
+ text_encoder = T5EncoderModel.from_pretrained(""THUDM/CogVideoX-5b-I2V"", subfolder=""text_encoder"", torch_dtype=torch.bfloat16)
215
+ quantize_(text_encoder, quantization())
216
+
217
+ transformer = CogVideoXTransformer3DModel.from_pretrained(""THUDM/CogVideoX-5b-I2V"",subfolder=""transformer"", torch_dtype=torch.bfloat16)
218
+ quantize_(transformer, quantization())
219
+
220
+ vae = AutoencoderKLCogVideoX.from_pretrained(""THUDM/CogVideoX-5b-I2V"", subfolder=""vae"", torch_dtype=torch.bfloat16)
221
+ quantize_(vae, quantization())
222
+
223
+ # Create pipeline and run inference
224
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
225
+ ""THUDM/CogVideoX-5b-I2V"",
226
+ text_encoder=text_encoder,
227
+ transformer=transformer,
228
+ vae=vae,
229
+ torch_dtype=torch.bfloat16,
230
+ )
231
+
232
+ pipe.enable_model_cpu_offload()
233
+ pipe.vae.enable_tiling()
234
+ pipe.vae.enable_slicing()
235
+
236
+ prompt = ""A little girl is riding a bicycle at high speed. Focused, detailed, realistic.""
237
+ image = load_image(image=""input.jpg"")
238
+ video = pipe(
239
+ prompt=prompt,
240
+ image=image,
241
+ num_videos_per_prompt=1,
242
+ num_inference_steps=50,
243
+ num_frames=49,
244
+ guidance_scale=6,
245
+ generator=torch.Generator(device=""cuda"").manual_seed(42),
246
+ ).frames[0]
247
+
248
+ export_to_video(video, ""output.mp4"", fps=8)
249
+ ```
250
+
251
+ Additionally, these models can be serialized and stored using PytorchAO in quantized data types to save disk space. You
252
+ can find examples and benchmarks at the following links:
253
+
254
+ - [torchao](https://gist.github.com/a-r-r-o-w/4d9732d17412888c885480c6521a9897)
255
+ - [quanto](https://gist.github.com/a-r-r-o-w/31be62828b00a9292821b85c1017effa)
256
+
257
+ ## Further Exploration
258
+
259
+ Feel free to enter our [GitHub](https://github.com/THUDM/CogVideo), where you'll find:
260
+
261
+ 1. More detailed technical explanations and code.
262
+ 2. Optimized prompt examples and conversions.
263
+ 3. Detailed code for model inference and fine-tuning.
264
+ 4. Project update logs and more interactive opportunities.
265
+ 5. CogVideoX toolchain to help you better use the model.
266
+ 6. INT8 model inference code.
267
+
268
+ ## Model License
269
+
270
+ This model is released under the [CogVideoX LICENSE](LICENSE).
271
+
272
+ ## Citation
273
+
274
+ ```
275
+ @article{yang2024cogvideox,
276
+ title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
277
+ author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others},
278
+ journal={arXiv preprint arXiv:2408.06072},
279
+ year={2024}
280
+ }
281
+ ```","{""id"": ""THUDM/CogVideoX-5b-I2V"", ""author"": ""THUDM"", ""sha"": ""a6f0f4858a8395e7429d82493864ce92bf73af11"", ""last_modified"": ""2024-11-23 06:34:36+00:00"", ""created_at"": ""2024-09-16 02:57:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 88546, ""downloads_all_time"": null, ""likes"": 288, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""cogvideox"", ""video-generation"", ""thudm"", ""image-to-video"", ""en"", ""arxiv:2408.06072"", ""license:other"", ""diffusers:CogVideoXImageToVideoPipeline"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/LICENSE\ntags:\n- cogvideox\n- video-generation\n- thudm\n- image-to-video\ninference: false"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""CogVideoXImageToVideoPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README_zh.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/spiece.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00001-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00002-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00003-of-00003.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""fffiloni/DimensionX"", ""wileewang/TransPixar"", ""Skywork/skyreels-a1-talking-head"", ""guardiancc/image-to-video-cog"", ""ShuoChen20/DimensionX"", ""peterpeter8585/image-to-video-cog"", ""fffiloni/Go-With-The-Flow"", ""RobinsAIWorld/image-to-video-cog"", ""LTT/Kiss3DGen"", ""AmberHeart/AetherV1"", ""JoPmt/ConsisID"", ""theSure/Omnieraser"", ""akthangdz/TEXT_TO_VIDEO"", ""PengWeixuanSZU/Senorita"", ""Felguk/Decraft"", ""123LETSPLAY/hello"", ""123LETSPLAY/imagetovideo"", ""123LETSPLAY/imagetovid-try2"", ""patrickblanks/DimensionX"", ""waloneai/image-to-video-cog"", ""OneOverZero/Go-With-The-Flow"", ""shaaravpawar/image-video"", ""Nymbo/DimensionX"", ""tsqn/CogVideoX-5B-24frames_20steps-low_vram"", ""IseIcyEyes/DimensionX"", ""fsfsess/DimensionX"", ""grbell/HotdogOrNot"", ""meepmoo/ConsisID"", ""batkovdev/image-to-video-cog"", ""svjack/VideoModelStudio"", ""morbiwalaq/Text2Vid-AI"", ""etherealbeats/image-to-video-cog"", ""Jdp1985/skyreels-a1-talking-head"", ""beowcow/skyreels-a1-talking-head"", ""makululinux/image2video""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-23 06:34:36+00:00"", ""cardData"": ""language:\n- en\nlicense: other\nlicense_link: https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/LICENSE\ntags:\n- cogvideox\n- video-generation\n- thudm\n- image-to-video\ninference: false"", ""transformersInfo"": null, ""_id"": ""66e79eb10cd314a638cf0148"", ""modelId"": ""THUDM/CogVideoX-5b-I2V"", ""usedStorage"": 22072095223}",0,,0,https://huggingface.co/BelGio13/cogvideoX-I2V-locobot,1,https://huggingface.co/Skywork/SkyReels-A1,1,,0,"AmberHeart/AetherV1, Felguk/Decraft, JoPmt/ConsisID, PengWeixuanSZU/Senorita, RobinsAIWorld/image-to-video-cog, ShuoChen20/DimensionX, Skywork/skyreels-a1-talking-head, THUDM/CogVideoX-5B-Space, akthangdz/TEXT_TO_VIDEO, guardiancc/image-to-video-cog, huggingface/InferenceSupport/discussions/new?title=THUDM/CogVideoX-5b-I2V&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTHUDM%2FCogVideoX-5b-I2V%5D(%2FTHUDM%2FCogVideoX-5b-I2V)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, peterpeter8585/image-to-video-cog, theSure/Omnieraser, wileewang/TransPixar",14
ControlNetMediaPipeFace_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ CrucibleAI/ControlNetMediaPipeFace,"---
3
+ language:
4
+ - en
5
+ thumbnail: ''
6
+ tags:
7
+ - controlnet
8
+ - laion
9
+ - face
10
+ - mediapipe
11
+ - image-to-image
12
+ license: openrail
13
+ base_model: stabilityai/stable-diffusion-2-1-base
14
+ datasets:
15
+ - LAION-Face
16
+ - LAION
17
+ pipeline_tag: image-to-image
18
+ ---
19
+
20
+ # ControlNet LAION Face Dataset
21
+
22
+ ## Table of Contents:
23
+ - Overview: Samples, Contents, and Construction
24
+ - Usage: Downloading, Training, and Inference
25
+ - License
26
+ - Credits and Thanks
27
+
28
+ # Overview:
29
+
30
+ This dataset is designed to train a ControlNet with human facial expressions. It includes keypoints for pupils to allow gaze direction. Training has been tested on Stable Diffusion v2.1 base (512) and Stable Diffusion v1.5.
31
+
32
+ ## Samples:
33
+
34
+ Cherry-picked from ControlNet + Stable Diffusion v2.1 Base
35
+
36
+ |Input|Face Detection|Output|
37
+ |:---:|:---:|:---:|
38
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/happy_result.png"">|
39
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/neutral_result.png"">|
40
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sad_result.png"">|
41
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/screaming_result.png"">|
42
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/sideways_result.png"">|
43
+ |<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_source.jpg"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_annotation.png"">|<img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/surprised_result.png"">|
44
+
45
+ Images with multiple faces are also supported:
46
+
47
+ <img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_source.jpg"">
48
+
49
+ <img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_annotation.png"">
50
+
51
+ <img src=""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_result.png"">
52
+
53
+
54
+ ## Dataset Contents:
55
+
56
+ - train_laion_face.py - Entrypoint for ControlNet training.
57
+ - laion_face_dataset.py - Code for performing dataset iteration. Cropping and resizing happens here.
58
+ - tool_download_face_targets.py - A tool to read metadata.json and populate the target folder.
59
+ - tool_generate_face_poses.py - The original file used to generate the source images. Included for reproducibility, but not required for training.
60
+ - training/laion-face-processed/prompt.jsonl - Read by laion_face_dataset. Includes prompts for the images.
61
+ - training/laion-face-processed/metadata.json - Excerpts from LAION for the relevant data. Also used for downloading the target dataset.
62
+ - training/laion-face-processed/source/xxxxxxxxx.jpg - Images with detections performed. Generated from the target images.
63
+ - training/laion-face-processed/target/xxxxxxxxx.jpg - Selected images from LAION Face.
64
+
65
+ ## Dataset Construction:
66
+
67
+ Source images were generated by pulling slice 00000 from LAION Face and passing them through MediaPipe's face detector with special configuration parameters.
68
+
69
+ The colors and line thicknesses used for MediaPipe are as follows:
70
+
71
+ ```
72
+ f_thick = 2
73
+ f_rad = 1
74
+ right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad)
75
+ right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad)
76
+ right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad)
77
+ left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad)
78
+ left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad)
79
+ left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad)
80
+ mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad)
81
+ head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad)
82
+
83
+ iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw}
84
+ ```
85
+
86
+ We have implemented a method named `draw_pupils` which modifies some functionality from MediaPipe. It exists as a stopgap until some pending changes are merged.
87
+
88
+
89
+ # Usage:
90
+
91
+ The containing ZIP file should be decompressed into the root of the ControlNet directory. The `train_laion_face.py`, `laion_face_dataset.py`, and other `.py` files should sit adjacent to `tutorial_train.py` and `tutorial_train_sd21.py`. We are assuming a checkout of the ControlNet repo at 0acb7e5, but there is no direct dependency on the repository.
92
+
93
+ ## Downloading:
94
+
95
+ For copyright reasons, we cannot include the original target files. We have provided a script (tool_download_face_targets.py) which will read from training/laion-face-processed/metadata.json and populate the target folder. This file has no requirements, but will use tqdm if it is installed.
96
+
97
+ ## Training:
98
+
99
+ When the targets folder is fully populated, training can be run on a machine with at least 24 gigabytes of VRAM. Our model was trained for 200 hours (four epochs) on an A6000.
100
+
101
+ ```bash
102
+ python tool_add_control.py ./models/v1-5-pruned-emaonly.ckpt ./models/controlnet_sd15_laion_face.ckpt
103
+ python ./train_laion_face_sd15.py
104
+ ```
105
+
106
+ ## Inference:
107
+
108
+ We have provided `gradio_face2image.py`. Update the following two lines to point them to your trained model.
109
+
110
+ ```
111
+ model = create_model('./models/cldm_v21.yaml').cpu() # If you fine-tune on SD2.1 base, this does not need to change.
112
+ model.load_state_dict(load_state_dict('./models/control_sd21_openpose.pth', location='cuda'))
113
+ ```
114
+
115
+ The model has some limitations: while it is empirically better at tracking gaze and mouth poses than previous attempts, it may still ignore controls. Adding details to the prompt like, ""looking right"" can abate bad behavior.
116
+
117
+ ## 🧨 Diffusers
118
+
119
+ It is recommended to use the checkpoint with [Stable Diffusion 2.1 - Base](stabilityai/stable-diffusion-2-1-base) as the checkpoint has been trained on it.
120
+ Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion.
121
+
122
+ To use with Stable Diffusion 1.5, insert `subfolder=""diffusion_sd15""` into the from_pretrained arguments. A v1.5 half-precision variant is provided but untested.
123
+
124
+ 1. Install `diffusers` and related packages:
125
+ ```
126
+ $ pip install diffusers transformers accelerate
127
+ ```
128
+
129
+ 2. Run code:
130
+ ```py
131
+ from PIL import Image
132
+ import numpy as np
133
+ import torch
134
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
135
+ from diffusers.utils import load_image
136
+
137
+ image = load_image(
138
+ ""https://huggingface.co/CrucibleAI/ControlNetMediaPipeFace/resolve/main/samples_laion_face_dataset/family_annotation.png""
139
+ )
140
+
141
+ # Stable Diffusion 2.1-base:
142
+ controlnet = ControlNetModel.from_pretrained(""CrucibleAI/ControlNetMediaPipeFace"", torch_dtype=torch.float16, variant=""fp16"")
143
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
144
+ ""stabilityai/stable-diffusion-2-1-base"", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
145
+ )
146
+ # OR
147
+ # Stable Diffusion 1.5:
148
+ controlnet = ControlNetModel.from_pretrained(""CrucibleAI/ControlNetMediaPipeFace"", subfolder=""diffusion_sd15"")
149
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(""runwayml/stable-diffusion-v1-5"", controlnet=controlnet, safety_checker=None)
150
+
151
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
152
+
153
+ # Remove if you do not have xformers installed
154
+ # see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
155
+ # for installation instructions
156
+ pipe.enable_xformers_memory_efficient_attention()
157
+ pipe.enable_model_cpu_offload()
158
+
159
+ image = pipe(""a happy family at a dentist advertisement"", image=image, num_inference_steps=30).images[0]
160
+ image.save('./images.png')
161
+ ```
162
+
163
+
164
+ # License:
165
+
166
+ ### Source Images: (/training/laion-face-processed/source/)
167
+ This work is marked with CC0 1.0. To view a copy of this license, visit http://creativecommons.org/publicdomain/zero/1.0
168
+
169
+ ### Trained Models:
170
+ Our trained ControlNet checkpoints are released under CreativeML Open RAIL-M.
171
+
172
+ ### Source Code:
173
+ lllyasviel/ControlNet is licensed under the Apache License 2.0
174
+
175
+ Our modifications are released under the same license.
176
+
177
+
178
+ # Credits and Thanks:
179
+
180
+ Greatest thanks to Zhang et al. for ControlNet, Rombach et al. (StabilityAI) for Stable Diffusion, and Schuhmann et al. for LAION.
181
+
182
+ Sample images for this document were obtained from Unsplash and are CC0.
183
+
184
+ ```
185
+ @misc{zhang2023adding,
186
+ title={Adding Conditional Control to Text-to-Image Diffusion Models},
187
+ author={Lvmin Zhang and Maneesh Agrawala},
188
+ year={2023},
189
+ eprint={2302.05543},
190
+ archivePrefix={arXiv},
191
+ primaryClass={cs.CV}
192
+ }
193
+
194
+ @misc{rombach2021highresolution,
195
+ title={High-Resolution Image Synthesis with Latent Diffusion Models},
196
+ author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer},
197
+ year={2021},
198
+ eprint={2112.10752},
199
+ archivePrefix={arXiv},
200
+ primaryClass={cs.CV}
201
+ }
202
+
203
+ @misc{schuhmann2022laion5b,
204
+ title={LAION-5B: An open large-scale dataset for training next generation image-text models},
205
+ author={Christoph Schuhmann and Romain Beaumont and Richard Vencu and Cade Gordon and Ross Wightman and Mehdi Cherti and Theo Coombes and Aarush Katta and Clayton Mullis and Mitchell Wortsman and Patrick Schramowski and Srivatsa Kundurthy and Katherine Crowson and Ludwig Schmidt and Robert Kaczmarczyk and Jenia Jitsev},
206
+ year={2022},
207
+ eprint={2210.08402},
208
+ archivePrefix={arXiv},
209
+ primaryClass={cs.CV}
210
+ }
211
+ ```
212
+
213
+ This project was made possible by Crucible AI.","{""id"": ""CrucibleAI/ControlNetMediaPipeFace"", ""author"": ""CrucibleAI"", ""sha"": ""f6ed75cc495674bea8bf7409ef3d0e5bfb7d8c90"", ""last_modified"": ""2023-05-19 19:32:02+00:00"", ""created_at"": ""2023-03-30 18:28:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1595, ""downloads_all_time"": null, ""likes"": 566, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""controlnet"", ""laion"", ""face"", ""mediapipe"", ""image-to-image"", ""en"", ""dataset:LAION-Face"", ""dataset:LAION"", ""arxiv:2302.05543"", ""arxiv:2112.10752"", ""arxiv:2210.08402"", ""base_model:stabilityai/stable-diffusion-2-1-base"", ""base_model:adapter:stabilityai/stable-diffusion-2-1-base"", ""license:openrail"", ""region:us""], ""pipeline_tag"": ""image-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: stabilityai/stable-diffusion-2-1-base\ndatasets:\n- LAION-Face\n- LAION\nlanguage:\n- en\nlicense: openrail\npipeline_tag: image-to-image\ntags:\n- controlnet\n- laion\n- face\n- mediapipe\n- image-to-image"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.full.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd15_mediapipe_face.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.full.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='control_v2p_sd21_mediapipe_face.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_sd15/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gradio_face2image.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='laion_face_common.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='laion_face_dataset.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/family_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/happy_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/neutral_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sad_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/screaming_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/sideways_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_annotation.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_result.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='samples_laion_face_dataset/surprised_source.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tool_download_face_targets.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tool_generate_face_poses.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_laion_face.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train_laion_face_sd15.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""CrucibleAI/ControlNetMediaPipeFaceSD21"", ""KumaPower/AvatarArtist"", ""TheNetherWatcher/Vid2Vid-using-Text-prompt"", ""MA9149210776/CrucibleAI-ControlNetMediaPipeFace"", ""RickyD/CrucibleAI-ControlNetMediaPipeFaceStreamlit"", ""krnl/venereital-IA-23-space"", ""nehho/CrucibleAI-ControlNetMediaPipeFace"", ""teganmosi/CrucibleAI-ControlNetMediaPipeFace"", ""tellview/CrucibleAI-ControlNetMediaPipeFace"", ""aixk/CrucibleAI-ControlNetMediaPipeFace"", ""satyac/CrucibleAI-ControlNetMediaPipeFace"", ""tidy/CrucibleAI-ControlNetMediaPipeFace"", ""Rooni/ImgToImg"", ""ZeLeL/CrucibleAI-ControlNetMediaPipeFace"", ""Akay2024/CrucibleAI-ControlNetMediaPipeFace"", ""Testboydood/CrucibleAI-ControlNetMediaPipeFace"", ""anonicloudmail/test"", ""hohuiking/CrucibleAI-ControlNetMediaPipeFace"", ""tejas56789ce/CrucibleAI-ControlNetMediaPipeFace""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-19 19:32:02+00:00"", ""cardData"": ""base_model: stabilityai/stable-diffusion-2-1-base\ndatasets:\n- LAION-Face\n- LAION\nlanguage:\n- en\nlicense: openrail\npipeline_tag: image-to-image\ntags:\n- controlnet\n- laion\n- face\n- mediapipe\n- image-to-image"", ""transformersInfo"": null, ""_id"": ""6425d4b7d0a9d069e8e0ebbc"", ""modelId"": ""CrucibleAI/ControlNetMediaPipeFace"", ""usedStorage"": 34147689491}",0,,0,,0,,0,,0,"CrucibleAI/ControlNetMediaPipeFaceSD21, KumaPower/AvatarArtist, MA9149210776/CrucibleAI-ControlNetMediaPipeFace, RickyD/CrucibleAI-ControlNetMediaPipeFaceStreamlit, TheNetherWatcher/Vid2Vid-using-Text-prompt, aixk/CrucibleAI-ControlNetMediaPipeFace, huggingface/InferenceSupport/discussions/new?title=CrucibleAI/ControlNetMediaPipeFace&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BCrucibleAI%2FControlNetMediaPipeFace%5D(%2FCrucibleAI%2FControlNetMediaPipeFace)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, krnl/venereital-IA-23-space, nehho/CrucibleAI-ControlNetMediaPipeFace, satyac/CrucibleAI-ControlNetMediaPipeFace, teganmosi/CrucibleAI-ControlNetMediaPipeFace, tellview/CrucibleAI-ControlNetMediaPipeFace, tidy/CrucibleAI-ControlNetMediaPipeFace",13
Cyberpunk-Anime-Diffusion_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ DGSpitzer/Cyberpunk-Anime-Diffusion,"---
3
+ language:
4
+ - en
5
+ thumbnail: ""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png""
6
+ tags:
7
+ - cyberpunk
8
+ - anime
9
+ - waifu-diffusion
10
+ - stable-diffusion
11
+ - aiart
12
+ - text-to-image
13
+ license: creativeml-openrail-m
14
+ ---
15
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/5.jpg"" width=""512"" height=""512""/></center>
16
+
17
+ ![visitors](https://visitor-badge.glitch.me/badge?page_id=Cyberpunk_Anime_Diffusion)
18
+
19
+ # Cyberpunk Anime Diffusion
20
+
21
+ An AI model that generates cyberpunk anime characters!~
22
+
23
+ Based of a finetuned Waifu Diffusion V1.3 Model with Stable Diffusion V1.5 New Vae, training in Dreambooth
24
+
25
+ by [DGSpitzer](https://www.youtube.com/channel/UCzzsYBF4qwtMwJaPJZ5SuPg)
26
+
27
+ ### 🧨 Diffusers
28
+
29
+ This repo contains both .ckpt and Diffuser model files. It's compatible to be used as any Stable Diffusion model, using standard [Stable Diffusion Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
30
+
31
+ You can convert this model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](https://huggingface.co/blog/stable_diffusion_jax).
32
+
33
+ ```python example for loading the Diffuser
34
+ #!pip install diffusers transformers scipy torch
35
+ from diffusers import StableDiffusionPipeline
36
+ import torch
37
+
38
+ model_id = ""DGSpitzer/Cyberpunk-Anime-Diffusion""
39
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
40
+ pipe = pipe.to(""cuda"")
41
+
42
+ prompt = ""a beautiful perfect face girl in dgs illustration style, Anime fine details portrait of school girl in front of modern tokyo city landscape on the background deep bokeh, anime masterpiece, 8k, sharp high quality anime""
43
+ image = pipe(prompt).images[0]
44
+
45
+ image.save(""./cyberpunk_girl.png"")
46
+ ```
47
+
48
+ # Online Demo
49
+
50
+ You can try the Online Web UI demo build with [Gradio](https://github.com/gradio-app/gradio), or use Colab Notebook at here:
51
+
52
+ *My Online Space Demo*
53
+ [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/DGSpitzer/DGS-Diffusion-Space)
54
+
55
+ *Finetuned Diffusion WebUI Demo by anzorq*
56
+ [![Use Finetuned_Diffusion WebUI](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/anzorq/finetuned_diffusion)
57
+
58
+ *Colab Notebook*
59
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/HelixNGC7293/cyberpunk-anime-diffusion/blob/main/cyberpunk_anime_diffusion.ipynb)[![GitHub](https://badgen.net/badge/icon/Github?icon=github&label)](https://github.com/HelixNGC7293/cyberpunk-anime-diffusion)
60
+
61
+ *Buy me a coffee if you like this project ;P ♥*
62
+ [![Buy me a coffee](https://badgen.net/badge/icon/Buy%20Me%20A%20Coffee?icon=buymeacoffee&label)](https://www.buymeacoffee.com/dgspitzer)
63
+
64
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/1.jpg"" width=""512"" height=""512""/></center>
65
+
66
+ # **👇Model👇**
67
+
68
+ AI Model Weights available at huggingface: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion
69
+
70
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/2.jpg"" width=""512"" height=""512""/></center>
71
+
72
+ # Usage
73
+
74
+ After model loaded, use keyword **dgs** in your prompt, with **illustration style** to get even better results.
75
+
76
+ For sampler, use **Euler A** for the best result (**DDIM** kinda works too), CFG Scale 7, steps 20 should be fine
77
+
78
+ **Example 1:**
79
+
80
+ ```
81
+ portrait of a girl in dgs illustration style, Anime girl, female soldier working in a cyberpunk city, cleavage, ((perfect femine face)), intricate, 8k, highly detailed, shy, digital painting, intense, sharp focus
82
+ ```
83
+
84
+ For cyber robot male character, you can add **muscular male** to improve the output.
85
+
86
+ **Example 2:**
87
+
88
+ ```
89
+ a photo of muscular beard soldier male in dgs illustration style, half-body, holding robot arms, strong chest
90
+ ```
91
+
92
+ **Example 3 (with Stable Diffusion WebUI):**
93
+
94
+ If using [AUTOMATIC1111's Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
95
+
96
+ You can simply use this as **prompt** with **Euler A** Sampler, CFG Scale 7, steps 20, 704 x 704px output res:
97
+
98
+ ```
99
+ an anime girl in dgs illustration style
100
+ ```
101
+
102
+ And set the **negative prompt** as this to get cleaner face:
103
+
104
+ ```
105
+ out of focus, scary, creepy, evil, disfigured, missing limbs, ugly, gross, missing fingers
106
+ ```
107
+
108
+ This will give you the exactly same style as the sample images above.
109
+
110
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/ReadmeAddon.jpg"" width=""256"" height=""353""/></center>
111
+
112
+ ---
113
+
114
+ **NOTE: usage of this model implies accpetance of stable diffusion's [CreativeML Open RAIL-M license](LICENSE)**
115
+
116
+ ---
117
+
118
+
119
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/4.jpg"" width=""700"" height=""700""/></center>
120
+
121
+ <center><img src=""https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/6.jpg"" width=""700"" height=""700""/></center>
122
+ ","{""id"": ""DGSpitzer/Cyberpunk-Anime-Diffusion"", ""author"": ""DGSpitzer"", ""sha"": ""2b6407002b73374e6864d3647f4eb9659bca36a9"", ""last_modified"": ""2023-06-21 20:44:20+00:00"", ""created_at"": ""2022-10-27 17:02:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 617, ""downloads_all_time"": null, ""likes"": 546, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""cyberpunk"", ""anime"", ""waifu-diffusion"", ""stable-diffusion"", ""aiart"", ""text-to-image"", ""en"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- cyberpunk\n- anime\n- waifu-diffusion\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cyberpunk-Anime-Diffusion.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Cyberpunk-Anime-Diffusion.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cyberpunk_anime_diffusion.ipynb', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/4.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/5.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/6.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/ReadmeAddon.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='img/thumbnail.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.fp16.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""anzorq/finetuned_diffusion"", ""darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui"", ""Yntec/ToyWorld"", ""darkstorm2150/protogen-web-ui"", ""Yntec/PrintingPress"", ""vorstcavry/ai"", ""kamiyamai/stable-diffusion-webui"", ""DGSpitzer/DGS-Diffusion-Space"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""BilalSardar/Text-To-image-AllModels"", ""AdamOswald1/finetuned_diffusion"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""IAmXenos21/stable-diffusion-webui-VORST2"", ""John6666/PrintingPress4"", ""Nickhilearla135095/maximum_diffusion"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""AlStable/AlPrompt"", ""Rifd/ngees_doang"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Joeythemonster/Text-To-image-AllModels"", ""Evel/Evel_Space"", ""luisrguerra/sd-real-dream-lcm-cpu"", ""Daniela-C/6x_Image_diffusion"", ""akhaliq/webui-orangemixs"", ""Dao3/Text-To-image-AllModels"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""ConceptArtHouse/webui-gameasset"", ""mindtube/Diffusion50XX"", ""TheKitten/Fast-Images-Creature"", ""Nymbo/Diffusion80XX4sg"", ""YeOldHermit/StableDiffusion_AnythingV3_ModelCamenduru"", ""zwv9/webui-cpu"", ""kaleidoskop-hug/PrintingPress"", ""Adam111/stable-diffusion-webui"", ""vs4vijay/stable-diffusion"", ""Yasu55/stable-diffusion-webui"", ""ennov8ion/stablediffusion-models"", ""Shocky/Pink-Anime"", ""ReiPlush64/finetuned_diffusion"", ""John6666/ToyWorld4"", ""sasaro/webui"", ""Omnibus-archive/Diffusion-Flood"", ""Crossper6/stable-diffusion-webui"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""Malifex/CPU-Anything-V3.0-WebUI"", ""lianzhou/stable-diffusion-webui"", ""Missinginaction/stablediffusionwithnofilter"", ""arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""thestasi/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper"", ""achyuth1344/stable-diffusion-webui"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""ennov8ion/500models"", ""Minecraft3193092/Stable-Diffusion-8"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""deaf1296/finetuned_diffusion"", ""pieeetre/stable-diffusion-webui"", ""luluneko1/stable-diffusion-webui"", ""Lyra121/finetuned_diffusion"", ""voltcutter/stable-diffusion-webui"", ""hylee/finetuned_diffusion"", ""RhythmRemix14/PrintingPressDx"", ""Minecraft3193092/Stable-Diffusion-7"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""AiiluoChen/webui"", ""Heckeroo/Cyberpunk-Anime-Diffusion"", ""Eduger/webui"", ""bobathetheft/webui"", ""natvill/stable-diffusion-webui"", ""Danielito/webui"", ""Eyeszik/webui"", ""YuraM/Stable-Diffusion-Protogen-webui"", ""TheFellow42/webui"", ""OswaldDev/webuih"", ""trhacknon/webui"", ""Harshveer/Finetuned_Diffusion_Max"", ""gato001k1/maximum_diffusion0k"", ""rubberboy/stable-diffusion-webui"", ""hilmyblaze/WebUI-Counterfeit-V2.5""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-06-21 20:44:20+00:00"", ""cardData"": ""language:\n- en\nlicense: creativeml-openrail-m\ntags:\n- cyberpunk\n- anime\n- waifu-diffusion\n- stable-diffusion\n- aiart\n- text-to-image\nthumbnail: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png"", ""transformersInfo"": null, ""_id"": ""635ab9b93180c590f4f48db9"", ""modelId"": ""DGSpitzer/Cyberpunk-Anime-Diffusion"", ""usedStorage"": 24647194668}",0,,0,,0,,0,,0,"DGSpitzer/DGS-Diffusion-Space, IAmXenos21/stable-diffusion-webui-VORST2, Joeythemonster/Text-To-image-AllModels, John6666/Diffusion80XX4sg, John6666/PrintingPress4, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, anzorq/finetuned_diffusion, darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui, huggingface/InferenceSupport/discussions/new?title=DGSpitzer/Cyberpunk-Anime-Diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BDGSpitzer%2FCyberpunk-Anime-Diffusion%5D(%2FDGSpitzer%2FCyberpunk-Anime-Diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, phenixrhyder/NSFW-ToyWorld, vorstcavry/ai",15
DeepSeek-R1-Distill-Llama-8B-GGUF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF,"---
3
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B
4
+ language:
5
+ - en
6
+ library_name: transformers
7
+ license: llama3.1
8
+ tags:
9
+ - deepseek
10
+ - unsloth
11
+ - transformers
12
+ - llama
13
+ - llama-3
14
+ - meta
15
+ ---
16
+ <div>
17
+ <p style=""margin-bottom: 0; margin-top: 0;"">
18
+ <strong>See <a href=""https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5"">our collection</a> for versions of Deepseek-R1 including GGUF & 4-bit formats.</strong>
19
+ </p>
20
+ <p style=""margin-bottom: 0;"">
21
+ <em>Unsloth's DeepSeek-R1 <a href=""https://unsloth.ai/blog/deepseekr1-dynamic"">1.58-bit + 2-bit Dynamic Quants</a> is selectively quantized, greatly improving accuracy over standard 1-bit/2-bit.</em>
22
+ </p>
23
+ <div style=""display: flex; gap: 5px; align-items: center; "">
24
+ <a href=""https://github.com/unslothai/unsloth/"">
25
+ <img src=""https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png"" width=""133"">
26
+ </a>
27
+ <a href=""https://discord.gg/unsloth"">
28
+ <img src=""https://github.com/unslothai/unsloth/raw/main/images/Discord%20button.png"" width=""173"">
29
+ </a>
30
+ <a href=""https://docs.unsloth.ai/basics/tutorial-how-to-run-deepseek-r1-on-your-own-local-device"">
31
+ <img src=""https://raw.githubusercontent.com/unslothai/unsloth/refs/heads/main/images/documentation%20green%20button.png"" width=""143"">
32
+ </a>
33
+ </div>
34
+ <h1 style=""margin-top: 0rem;"">Instructions to run this model in llama.cpp:</h2>
35
+ </div>
36
+
37
+ You can view more detailed instructions in our blog: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1)
38
+ 1. Do not forget about `<|User|>` and `<|Assistant|>` tokens! - Or use a chat template formatter
39
+ 2. Obtain the latest `llama.cpp` at https://github.com/ggerganov/llama.cpp
40
+ 3. Example with Q8_0 K quantized cache **Notice -no-cnv disables auto conversation mode**
41
+ ```bash
42
+ ./llama.cpp/llama-cli \
43
+ --model unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF/DeepSeek-R1-Distill-Llama-8B-Q4_K_M.gguf \
44
+ --cache-type-k q8_0 \
45
+ --threads 16 \
46
+ --prompt '<|User|>What is 1+1?<|Assistant|>' \
47
+ -no-cnv
48
+ ```
49
+ Example output:
50
+
51
+ ```txt
52
+ <think>
53
+ Okay, so I need to figure out what 1 plus 1 is. Hmm, where do I even start? I remember from school that adding numbers is pretty basic, but I want to make sure I understand it properly.
54
+ Let me think, 1 plus 1. So, I have one item and I add another one. Maybe like a apple plus another apple. If I have one apple and someone gives me another, I now have two apples. So, 1 plus 1 should be 2. That makes sense.
55
+ Wait, but sometimes math can be tricky. Could it be something else? Like, in a different number system maybe? But I think the question is straightforward, using regular numbers, not like binary or hexadecimal or anything.
56
+ I also recall that in arithmetic, addition is combining quantities. So, if you have two quantities of 1, combining them gives you a total of 2. Yeah, that seems right.
57
+ Is there a scenario where 1 plus 1 wouldn't be 2? I can't think of any...
58
+ ```
59
+
60
+ 4. If you have a GPU (RTX 4090 for example) with 24GB, you can offload multiple layers to the GPU for faster processing. If you have multiple GPUs, you can probably offload more layers.
61
+ ```bash
62
+ ./llama.cpp/llama-cli \
63
+ --model unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF/DeepSeek-R1-Distill-Llama-8B-Q4_K_M.gguf
64
+ --cache-type-k q8_0
65
+ --threads 16
66
+ --prompt '<|User|>What is 1+1?<|Assistant|>'
67
+ --n-gpu-layers 20 \
68
+ -no-cnv
69
+ ```
70
+
71
+ # Finetune your own Reasoning model like R1 with Unsloth!
72
+ We have a free Google Colab notebook for turning Llama 3.1 (8B) into a reasoning model: https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb
73
+
74
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png"" width=""200""/>](https://discord.gg/unsloth)
75
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
76
+
77
+
78
+ ## ✨ Finetune for Free
79
+
80
+ All notebooks are **beginner friendly**! Add your dataset, click ""Run All"", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face.
81
+
82
+ | Unsloth supports | Free Notebooks | Performance | Memory use |
83
+ |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------|
84
+ | **GRPO with Phi-4 (14B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4_(14B)-GRPO.ipynb) | 2x faster | 80% less |
85
+ | **Llama-3.2 (3B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2.4x faster | 58% less |
86
+ | **Llama-3.2 (11B vision)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 60% less |
87
+ | **Qwen2 VL (7B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb) | 1.8x faster | 60% less |
88
+ | **Qwen2.5 (7B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 60% less |
89
+ | **Llama-3.1 (8B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2.4x faster | 58% less |
90
+ | **Phi-3.5 (mini)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_3.5_Mini-Conversational.ipynb) | 2x faster | 50% less |
91
+ | **Gemma 2 (9B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2.4x faster | 58% less |
92
+ | **Mistral (7B)** | [▶️ Start on Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 62% less |
93
+
94
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/refs/heads/main/images/documentation%20green%20button.png"" width=""200""/>](https://docs.unsloth.ai)
95
+
96
+ - This [Llama 3.2 conversational notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) is useful for ShareGPT ChatML / Vicuna templates.
97
+ - This [text completion notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_(7B)-Text_Completion.ipynb) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr.
98
+ - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster.
99
+
100
+ ## Special Thanks
101
+ A huge thank you to the DeepSeek team for creating and releasing these models.
102
+
103
+
104
+ # DeepSeek-R1
105
+ <!-- markdownlint-disable first-line-h1 -->
106
+ <!-- markdownlint-disable html -->
107
+ <!-- markdownlint-disable no-duplicate-header -->
108
+
109
+ <div align=""center"">
110
+ <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true"" width=""60%"" alt=""DeepSeek-V3"" />
111
+ </div>
112
+ <hr>
113
+ <div align=""center"" style=""line-height: 1;"">
114
+ <a href=""https://www.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
115
+ <img alt=""Homepage"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true"" style=""display: inline-block; vertical-align: middle;""/>
116
+ </a>
117
+ <a href=""https://chat.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
118
+ <img alt=""Chat"" src=""https://img.shields.io/badge/🤖%20Chat-DeepSeek%20R1-536af5?color=536af5&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
119
+ </a>
120
+ <a href=""https://huggingface.co/deepseek-ai"" target=""_blank"" style=""margin: 2px;"">
121
+ <img alt=""Hugging Face"" src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
122
+ </a>
123
+ </div>
124
+
125
+ <div align=""center"" style=""line-height: 1;"">
126
+ <a href=""https://discord.gg/Tc7c45Zzu5"" target=""_blank"" style=""margin: 2px;"">
127
+ <img alt=""Discord"" src=""https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da"" style=""display: inline-block; vertical-align: middle;""/>
128
+ </a>
129
+ <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true"" target=""_blank"" style=""margin: 2px;"">
130
+ <img alt=""Wechat"" src=""https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
131
+ </a>
132
+ <a href=""https://twitter.com/deepseek_ai"" target=""_blank"" style=""margin: 2px;"">
133
+ <img alt=""Twitter Follow"" src=""https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
134
+ </a>
135
+ </div>
136
+
137
+ <div align=""center"" style=""line-height: 1;"">
138
+ <a href=""https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE-CODE"" style=""margin: 2px;"">
139
+ <img alt=""Code License"" src=""https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
140
+ </a>
141
+ <a href=""https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE-MODEL"" style=""margin: 2px;"">
142
+ <img alt=""Model License"" src=""https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
143
+ </a>
144
+ </div>
145
+
146
+
147
+ <p align=""center"">
148
+ <a href=""https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf""><b>Paper Link</b>👁️</a>
149
+ </p>
150
+
151
+
152
+ ## 1. Introduction
153
+
154
+ We introduce our first-generation reasoning models, DeepSeek-R1-Zero and DeepSeek-R1.
155
+ DeepSeek-R1-Zero, a model trained via large-scale reinforcement learning (RL) without supervised fine-tuning (SFT) as a preliminary step, demonstrated remarkable performance on reasoning.
156
+ With RL, DeepSeek-R1-Zero naturally emerged with numerous powerful and interesting reasoning behaviors.
157
+ However, DeepSeek-R1-Zero encounters challenges such as endless repetition, poor readability, and language mixing. To address these issues and further enhance reasoning performance,
158
+ we introduce DeepSeek-R1, which incorporates cold-start data before RL.
159
+ DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks.
160
+ To support the research community, we have open-sourced DeepSeek-R1-Zero, DeepSeek-R1, and six dense models distilled from DeepSeek-R1 based on Llama and Qwen. DeepSeek-R1-Distill-Qwen-32B outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.
161
+
162
+ **NOTE: Before running DeepSeek-R1 series models locally, we kindly recommend reviewing the [Usage Recommendation](#usage-recommendations) section.**
163
+
164
+ <p align=""center"">
165
+ <img width=""80%"" src=""figures/benchmark.jpg"">
166
+ </p>
167
+
168
+ ## 2. Model Summary
169
+
170
+ ---
171
+
172
+ **Post-Training: Large-Scale Reinforcement Learning on the Base Model**
173
+
174
+ - We directly apply reinforcement learning (RL) to the base model without relying on supervised fine-tuning (SFT) as a preliminary step. This approach allows the model to explore chain-of-thought (CoT) for solving complex problems, resulting in the development of DeepSeek-R1-Zero. DeepSeek-R1-Zero demonstrates capabilities such as self-verification, reflection, and generating long CoTs, marking a significant milestone for the research community. Notably, it is the first open research to validate that reasoning capabilities of LLMs can be incentivized purely through RL, without the need for SFT. This breakthrough paves the way for future advancements in this area.
175
+
176
+ - We introduce our pipeline to develop DeepSeek-R1. The pipeline incorporates two RL stages aimed at discovering improved reasoning patterns and aligning with human preferences, as well as two SFT stages that serve as the seed for the model's reasoning and non-reasoning capabilities.
177
+ We believe the pipeline will benefit the industry by creating better models.
178
+
179
+ ---
180
+
181
+ **Distillation: Smaller Models Can Be Powerful Too**
182
+
183
+ - We demonstrate that the reasoning patterns of larger models can be distilled into smaller models, resulting in better performance compared to the reasoning patterns discovered through RL on small models. The open source DeepSeek-R1, as well as its API, will benefit the research community to distill better smaller models in the future.
184
+ - Using the reasoning data generated by DeepSeek-R1, we fine-tuned several dense models that are widely used in the research community. The evaluation results demonstrate that the distilled smaller dense models perform exceptionally well on benchmarks. We open-source distilled 1.5B, 7B, 8B, 14B, 32B, and 70B checkpoints based on Qwen2.5 and Llama3 series to the community.
185
+
186
+ ## 3. Model Downloads
187
+
188
+ ### DeepSeek-R1 Models
189
+
190
+ <div align=""center"">
191
+
192
+ | **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download** |
193
+ | :------------: | :------------: | :------------: | :------------: | :------------: |
194
+ | DeepSeek-R1-Zero | 671B | 37B | 128K | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Zero) |
195
+ | DeepSeek-R1 | 671B | 37B | 128K | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1) |
196
+
197
+ </div>
198
+
199
+ DeepSeek-R1-Zero & DeepSeek-R1 are trained based on DeepSeek-V3-Base.
200
+ For more details regarding the model architecture, please refer to [DeepSeek-V3](https://github.com/deepseek-ai/DeepSeek-V3) repository.
201
+
202
+ ### DeepSeek-R1-Distill Models
203
+
204
+ <div align=""center"">
205
+
206
+ | **Model** | **Base Model** | **Download** |
207
+ | :------------: | :------------: | :------------: |
208
+ | DeepSeek-R1-Distill-Qwen-1.5B | [Qwen2.5-Math-1.5B](https://huggingface.co/Qwen/Qwen2.5-Math-1.5B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) |
209
+ | DeepSeek-R1-Distill-Qwen-7B | [Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) |
210
+ | DeepSeek-R1-Distill-Llama-8B | [Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B) |
211
+ | DeepSeek-R1-Distill-Qwen-14B | [Qwen2.5-14B](https://huggingface.co/Qwen/Qwen2.5-14B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B) |
212
+ |DeepSeek-R1-Distill-Qwen-32B | [Qwen2.5-32B](https://huggingface.co/Qwen/Qwen2.5-32B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) |
213
+ | DeepSeek-R1-Distill-Llama-70B | [Llama-3.3-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B) |
214
+
215
+ </div>
216
+
217
+ DeepSeek-R1-Distill models are fine-tuned based on open-source models, using samples generated by DeepSeek-R1.
218
+ We slightly change their configs and tokenizers. Please use our setting to run these models.
219
+
220
+ ## 4. Evaluation Results
221
+
222
+ ### DeepSeek-R1-Evaluation
223
+ For all our models, the maximum generation length is set to 32,768 tokens. For benchmarks requiring sampling, we use a temperature of $0.6$, a top-p value of $0.95$, and generate 64 responses per query to estimate pass@1.
224
+ <div align=""center"">
225
+
226
+
227
+ | Category | Benchmark (Metric) | Claude-3.5-Sonnet-1022 | GPT-4o 0513 | DeepSeek V3 | OpenAI o1-mini | OpenAI o1-1217 | DeepSeek R1 |
228
+ |----------|-------------------|----------------------|------------|--------------|----------------|------------|--------------|
229
+ | | Architecture | - | - | MoE | - | - | MoE |
230
+ | | # Activated Params | - | - | 37B | - | - | 37B |
231
+ | | # Total Params | - | - | 671B | - | - | 671B |
232
+ | English | MMLU (Pass@1) | 88.3 | 87.2 | 88.5 | 85.2 | **91.8** | 90.8 |
233
+ | | MMLU-Redux (EM) | 88.9 | 88.0 | 89.1 | 86.7 | - | **92.9** |
234
+ | | MMLU-Pro (EM) | 78.0 | 72.6 | 75.9 | 80.3 | - | **84.0** |
235
+ | | DROP (3-shot F1) | 88.3 | 83.7 | 91.6 | 83.9 | 90.2 | **92.2** |
236
+ | | IF-Eval (Prompt Strict) | **86.5** | 84.3 | 86.1 | 84.8 | - | 83.3 |
237
+ | | GPQA-Diamond (Pass@1) | 65.0 | 49.9 | 59.1 | 60.0 | **75.7** | 71.5 |
238
+ | | SimpleQA (Correct) | 28.4 | 38.2 | 24.9 | 7.0 | **47.0** | 30.1 |
239
+ | | FRAMES (Acc.) | 72.5 | 80.5 | 73.3 | 76.9 | - | **82.5** |
240
+ | | AlpacaEval2.0 (LC-winrate) | 52.0 | 51.1 | 70.0 | 57.8 | - | **87.6** |
241
+ | | ArenaHard (GPT-4-1106) | 85.2 | 80.4 | 85.5 | 92.0 | - | **92.3** |
242
+ | Code | LiveCodeBench (Pass@1-COT) | 33.8 | 34.2 | - | 53.8 | 63.4 | **65.9** |
243
+ | | Codeforces (Percentile) | 20.3 | 23.6 | 58.7 | 93.4 | **96.6** | 96.3 |
244
+ | | Codeforces (Rating) | 717 | 759 | 1134 | 1820 | **2061** | 2029 |
245
+ | | SWE Verified (Resolved) | **50.8** | 38.8 | 42.0 | 41.6 | 48.9 | 49.2 |
246
+ | | Aider-Polyglot (Acc.) | 45.3 | 16.0 | 49.6 | 32.9 | **61.7** | 53.3 |
247
+ | Math | AIME 2024 (Pass@1) | 16.0 | 9.3 | 39.2 | 63.6 | 79.2 | **79.8** |
248
+ | | MATH-500 (Pass@1) | 78.3 | 74.6 | 90.2 | 90.0 | 96.4 | **97.3** |
249
+ | | CNMO 2024 (Pass@1) | 13.1 | 10.8 | 43.2 | 67.6 | - | **78.8** |
250
+ | Chinese | CLUEWSC (EM) | 85.4 | 87.9 | 90.9 | 89.9 | - | **92.8** |
251
+ | | C-Eval (EM) | 76.7 | 76.0 | 86.5 | 68.9 | - | **91.8** |
252
+ | | C-SimpleQA (Correct) | 55.4 | 58.7 | **68.0** | 40.3 | - | 63.7 |
253
+
254
+ </div>
255
+
256
+
257
+ ### Distilled Model Evaluation
258
+
259
+
260
+ <div align=""center"">
261
+
262
+ | Model | AIME 2024 pass@1 | AIME 2024 cons@64 | MATH-500 pass@1 | GPQA Diamond pass@1 | LiveCodeBench pass@1 | CodeForces rating |
263
+ |------------------------------------------|------------------|-------------------|-----------------|----------------------|----------------------|-------------------|
264
+ | GPT-4o-0513 | 9.3 | 13.4 | 74.6 | 49.9 | 32.9 | 759 |
265
+ | Claude-3.5-Sonnet-1022 | 16.0 | 26.7 | 78.3 | 65.0 | 38.9 | 717 |
266
+ | o1-mini | 63.6 | 80.0 | 90.0 | 60.0 | 53.8 | **1820** |
267
+ | QwQ-32B-Preview | 44.0 | 60.0 | 90.6 | 54.5 | 41.9 | 1316 |
268
+ | DeepSeek-R1-Distill-Qwen-1.5B | 28.9 | 52.7 | 83.9 | 33.8 | 16.9 | 954 |
269
+ | DeepSeek-R1-Distill-Qwen-7B | 55.5 | 83.3 | 92.8 | 49.1 | 37.6 | 1189 |
270
+ | DeepSeek-R1-Distill-Qwen-14B | 69.7 | 80.0 | 93.9 | 59.1 | 53.1 | 1481 |
271
+ | DeepSeek-R1-Distill-Qwen-32B | **72.6** | 83.3 | 94.3 | 62.1 | 57.2 | 1691 |
272
+ | DeepSeek-R1-Distill-Llama-8B | 50.4 | 80.0 | 89.1 | 49.0 | 39.6 | 1205 |
273
+ | DeepSeek-R1-Distill-Llama-70B | 70.0 | **86.7** | **94.5** | **65.2** | **57.5** | 1633 |
274
+
275
+ </div>
276
+
277
+
278
+ ## 5. Chat Website & API Platform
279
+ You can chat with DeepSeek-R1 on DeepSeek's official website: [chat.deepseek.com](https://chat.deepseek.com), and switch on the button ""DeepThink""
280
+
281
+ We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/)
282
+
283
+ ## 6. How to Run Locally
284
+
285
+ ### DeepSeek-R1 Models
286
+
287
+ Please visit [DeepSeek-V3](https://github.com/deepseek-ai/DeepSeek-V3) repo for more information about running DeepSeek-R1 locally.
288
+
289
+ ### DeepSeek-R1-Distill Models
290
+
291
+ DeepSeek-R1-Distill models can be utilized in the same manner as Qwen or Llama models.
292
+
293
+ For instance, you can easily start a service using [vLLM](https://github.com/vllm-project/vllm):
294
+
295
+ ```shell
296
+ vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-32B --tensor-parallel-size 2 --max-model-len 32768 --enforce-eager
297
+ ```
298
+
299
+ You can also easily start a service using [SGLang](https://github.com/sgl-project/sglang)
300
+
301
+ ```bash
302
+ python3 -m sglang.launch_server --model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B --trust-remote-code --tp 2
303
+ ```
304
+
305
+ ### Usage Recommendations
306
+
307
+ **We recommend adhering to the following configurations when utilizing the DeepSeek-R1 series models, including benchmarking, to achieve the expected performance:**
308
+
309
+ 1. Set the temperature within the range of 0.5-0.7 (0.6 is recommended) to prevent endless repetitions or incoherent outputs.
310
+ 2. **Avoid adding a system prompt; all instructions should be contained within the user prompt.**
311
+ 3. For mathematical problems, it is advisable to include a directive in your prompt such as: ""Please reason step by step, and put your final answer within \boxed{}.""
312
+ 4. When evaluating model performance, it is recommended to conduct multiple tests and average the results.
313
+
314
+ ## 7. License
315
+ This code repository and the model weights are licensed under the [MIT License](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE).
316
+ DeepSeek-R1 series support commercial use, allow for any modifications and derivative works, including, but not limited to, distillation for training other LLMs. Please note that:
317
+ - DeepSeek-R1-Distill-Qwen-1.5B, DeepSeek-R1-Distill-Qwen-7B, DeepSeek-R1-Distill-Qwen-14B and DeepSeek-R1-Distill-Qwen-32B are derived from [Qwen-2.5 series](https://github.com/QwenLM/Qwen2.5), which are originally licensed under [Apache 2.0 License](https://huggingface.co/Qwen/Qwen2.5-1.5B/blob/main/LICENSE), and now finetuned with 800k samples curated with DeepSeek-R1.
318
+ - DeepSeek-R1-Distill-Llama-8B is derived from Llama3.1-8B-Base and is originally licensed under [llama3.1 license](https://huggingface.co/meta-llama/Llama-3.1-8B/blob/main/LICENSE).
319
+ - DeepSeek-R1-Distill-Llama-70B is derived from Llama3.3-70B-Instruct and is originally licensed under [llama3.3 license](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct/blob/main/LICENSE).
320
+
321
+ ## 8. Citation
322
+ ```
323
+ @misc{deepseekai2025deepseekr1incentivizingreasoningcapability,
324
+ title={DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning},
325
+ author={DeepSeek-AI and Daya Guo and Dejian Yang and Haowei Zhang and Junxiao Song and Ruoyu Zhang and Runxin Xu and Qihao Zhu and Shirong Ma and Peiyi Wang and Xiao Bi and Xiaokang Zhang and Xingkai Yu and Yu Wu and Z. F. Wu and Zhibin Gou and Zhihong Shao and Zhuoshu Li and Ziyi Gao and Aixin Liu and Bing Xue and Bingxuan Wang and Bochao Wu and Bei Feng and Chengda Lu and Chenggang Zhao and Chengqi Deng and Chenyu Zhang and Chong Ruan and Damai Dai and Deli Chen and Dongjie Ji and Erhang Li and Fangyun Lin and Fucong Dai and Fuli Luo and Guangbo Hao and Guanting Chen and Guowei Li and H. Zhang and Han Bao and Hanwei Xu and Haocheng Wang and Honghui Ding and Huajian Xin and Huazuo Gao and Hui Qu and Hui Li and Jianzhong Guo and Jiashi Li and Jiawei Wang and Jingchang Chen and Jingyang Yuan and Junjie Qiu and Junlong Li and J. L. Cai and Jiaqi Ni and Jian Liang and Jin Chen and Kai Dong and Kai Hu and Kaige Gao and Kang Guan and Kexin Huang and Kuai Yu and Lean Wang and Lecong Zhang and Liang Zhao and Litong Wang and Liyue Zhang and Lei Xu and Leyi Xia and Mingchuan Zhang and Minghua Zhang and Minghui Tang and Meng Li and Miaojun Wang and Mingming Li and Ning Tian and Panpan Huang and Peng Zhang and Qiancheng Wang and Qinyu Chen and Qiushi Du and Ruiqi Ge and Ruisong Zhang and Ruizhe Pan and Runji Wang and R. J. Chen and R. L. Jin and Ruyi Chen and Shanghao Lu and Shangyan Zhou and Shanhuang Chen and Shengfeng Ye and Shiyu Wang and Shuiping Yu and Shunfeng Zhou and Shuting Pan and S. S. Li and Shuang Zhou and Shaoqing Wu and Shengfeng Ye and Tao Yun and Tian Pei and Tianyu Sun and T. Wang and Wangding Zeng and Wanjia Zhao and Wen Liu and Wenfeng Liang and Wenjun Gao and Wenqin Yu and Wentao Zhang and W. L. Xiao and Wei An and Xiaodong Liu and Xiaohan Wang and Xiaokang Chen and Xiaotao Nie and Xin Cheng and Xin Liu and Xin Xie and Xingchao Liu and Xinyu Yang and Xinyuan Li and Xuecheng Su and Xuheng Lin and X. Q. Li and Xiangyue Jin and Xiaojin Shen and Xiaosha Chen and Xiaowen Sun and Xiaoxiang Wang and Xinnan Song and Xinyi Zhou and Xianzu Wang and Xinxia Shan and Y. K. Li and Y. Q. Wang and Y. X. Wei and Yang Zhang and Yanhong Xu and Yao Li and Yao Zhao and Yaofeng Sun and Yaohui Wang and Yi Yu and Yichao Zhang and Yifan Shi and Yiliang Xiong and Ying He and Yishi Piao and Yisong Wang and Yixuan Tan and Yiyang Ma and Yiyuan Liu and Yongqiang Guo and Yuan Ou and Yuduan Wang and Yue Gong and Yuheng Zou and Yujia He and Yunfan Xiong and Yuxiang Luo and Yuxiang You and Yuxuan Liu and Yuyang Zhou and Y. X. Zhu and Yanhong Xu and Yanping Huang and Yaohui Li and Yi Zheng and Yuchen Zhu and Yunxian Ma and Ying Tang and Yukun Zha and Yuting Yan and Z. Z. Ren and Zehui Ren and Zhangli Sha and Zhe Fu and Zhean Xu and Zhenda Xie and Zhengyan Zhang and Zhewen Hao and Zhicheng Ma and Zhigang Yan and Zhiyu Wu and Zihui Gu and Zijia Zhu and Zijun Liu and Zilin Li and Ziwei Xie and Ziyang Song and Zizheng Pan and Zhen Huang and Zhipeng Xu and Zhongyu Zhang and Zhen Zhang},
326
+ year={2025},
327
+ eprint={2501.12948},
328
+ archivePrefix={arXiv},
329
+ primaryClass={cs.CL},
330
+ url={https://arxiv.org/abs/2501.12948},
331
+ }
332
+
333
+ ```
334
+
335
+ ## 9. Contact
336
+ If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
337
+ ","{""id"": ""unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""author"": ""unsloth"", ""sha"": ""baf4c6b106904cf0792625e212ab0e07a8e29181"", ""last_modified"": ""2025-04-19 09:09:25+00:00"", ""created_at"": ""2025-01-20 13:04:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 25402, ""downloads_all_time"": null, ""likes"": 262, ""library_name"": ""transformers"", ""gguf"": {""total"": 8030261312, ""architecture"": ""llama"", ""context_length"": 131072, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gguf"", ""llama"", ""text-generation"", ""deepseek"", ""unsloth"", ""llama-3"", ""meta"", ""en"", ""arxiv:2501.12948"", ""base_model:deepseek-ai/DeepSeek-R1-Distill-Llama-8B"", ""base_model:quantized:deepseek-ai/DeepSeek-R1-Distill-Llama-8B"", ""license:llama3.1"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us"", ""conversational""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3.1\ntags:\n- deepseek\n- unsloth\n- transformers\n- llama\n- llama-3\n- meta"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-F16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q2_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q2_K_L.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q3_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q4_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q5_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-IQ1_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-IQ1_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-IQ2_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-IQ2_XXS.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-IQ3_XXS.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-R1-Distill-Llama-8B-UD-Q4_K_XL.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-19 09:09:25+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3.1\ntags:\n- deepseek\n- unsloth\n- transformers\n- llama\n- llama-3\n- meta"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""678e49d9308aaf71687991ce"", ""modelId"": ""unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""usedStorage"": 73199460480}",0,"https://huggingface.co/pohsjxx/u-vertex-r1, https://huggingface.co/Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned",2,,0,https://huggingface.co/whyhow-ai/PatientSeek,1,,0,huggingface/InferenceSupport/discussions/new?title=unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bunsloth%2FDeepSeek-R1-Distill-Llama-8B-GGUF%5D(%2Funsloth%2FDeepSeek-R1-Distill-Llama-8B-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
338
+ pohsjxx/u-vertex-r1,"---
339
+ library_name: transformers
340
+ tags:
341
+ - unsloth
342
+ license: apache-2.0
343
+ datasets:
344
+ - pohsjxx/drone-logistics-cot-dataset
345
+ language:
346
+ - zh
347
+ base_model:
348
+ - unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF
349
+ pipeline_tag: question-answering
350
+ ---
351
+
352
+ # Model Card for Model ID
353
+
354
+ <!-- Provide a quick summary of what the model is/does. -->
355
+
356
+
357
+
358
+ ## Model Details
359
+
360
+ ### Model Description
361
+
362
+ <!-- Provide a longer summary of what this model is. -->
363
+
364
+ This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
365
+
366
+ - **Developed by:** [More Information Needed]
367
+ - **Funded by [optional]:** [More Information Needed]
368
+ - **Shared by [optional]:** [More Information Needed]
369
+ - **Model type:** [More Information Needed]
370
+ - **Language(s) (NLP):** [More Information Needed]
371
+ - **License:** [More Information Needed]
372
+ - **Finetuned from model [optional]:** [More Information Needed]
373
+
374
+ ### Model Sources [optional]
375
+
376
+ <!-- Provide the basic links for the model. -->
377
+
378
+ - **Repository:** [More Information Needed]
379
+ - **Paper [optional]:** [More Information Needed]
380
+ - **Demo [optional]:** [More Information Needed]
381
+
382
+ ## Uses
383
+
384
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
385
+
386
+ ### Direct Use
387
+
388
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
389
+
390
+ [More Information Needed]
391
+
392
+ ### Downstream Use [optional]
393
+
394
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
395
+
396
+ [More Information Needed]
397
+
398
+ ### Out-of-Scope Use
399
+
400
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
401
+
402
+ [More Information Needed]
403
+
404
+ ## Bias, Risks, and Limitations
405
+
406
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
407
+
408
+ [More Information Needed]
409
+
410
+ ### Recommendations
411
+
412
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
413
+
414
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
415
+
416
+ ## How to Get Started with the Model
417
+
418
+ Use the code below to get started with the model.
419
+
420
+ [More Information Needed]
421
+
422
+ ## Training Details
423
+
424
+ ### Training Data
425
+
426
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
427
+
428
+ [More Information Needed]
429
+
430
+ ### Training Procedure
431
+
432
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
433
+
434
+ #### Preprocessing [optional]
435
+
436
+ [More Information Needed]
437
+
438
+
439
+ #### Training Hyperparameters
440
+
441
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
442
+
443
+ #### Speeds, Sizes, Times [optional]
444
+
445
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
446
+
447
+ [More Information Needed]
448
+
449
+ ## Evaluation
450
+
451
+ <!-- This section describes the evaluation protocols and provides the results. -->
452
+
453
+ ### Testing Data, Factors & Metrics
454
+
455
+ #### Testing Data
456
+
457
+ <!-- This should link to a Dataset Card if possible. -->
458
+
459
+ [More Information Needed]
460
+
461
+ #### Factors
462
+
463
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
464
+
465
+ [More Information Needed]
466
+
467
+ #### Metrics
468
+
469
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
470
+
471
+ [More Information Needed]
472
+
473
+ ### Results
474
+
475
+ [More Information Needed]
476
+
477
+ #### Summary
478
+
479
+
480
+
481
+ ## Model Examination [optional]
482
+
483
+ <!-- Relevant interpretability work for the model goes here -->
484
+
485
+ [More Information Needed]
486
+
487
+ ## Environmental Impact
488
+
489
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
490
+
491
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
492
+
493
+ - **Hardware Type:** [More Information Needed]
494
+ - **Hours used:** [More Information Needed]
495
+ - **Cloud Provider:** [More Information Needed]
496
+ - **Compute Region:** [More Information Needed]
497
+ - **Carbon Emitted:** [More Information Needed]
498
+
499
+ ## Technical Specifications [optional]
500
+
501
+ ### Model Architecture and Objective
502
+
503
+ [More Information Needed]
504
+
505
+ ### Compute Infrastructure
506
+
507
+ [More Information Needed]
508
+
509
+ #### Hardware
510
+
511
+ [More Information Needed]
512
+
513
+ #### Software
514
+
515
+ [More Information Needed]
516
+
517
+ ## Citation [optional]
518
+
519
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
520
+
521
+ **BibTeX:**
522
+
523
+ [More Information Needed]
524
+
525
+ **APA:**
526
+
527
+ [More Information Needed]
528
+
529
+ ## Glossary [optional]
530
+
531
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
532
+
533
+ [More Information Needed]
534
+
535
+ ## More Information [optional]
536
+
537
+ [More Information Needed]
538
+
539
+ ## Model Card Authors [optional]
540
+
541
+ [More Information Needed]
542
+
543
+ ## Model Card Contact
544
+
545
+ [More Information Needed]","{""id"": ""pohsjxx/u-vertex-r1"", ""author"": ""pohsjxx"", ""sha"": ""375ef9334e88c7a1a1398ddfdcbffa84722fb6be"", ""last_modified"": ""2025-02-05 09:16:31+00:00"", ""created_at"": ""2025-02-05 03:47:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""unsloth"", ""question-answering"", ""zh"", ""dataset:pohsjxx/drone-logistics-cot-dataset"", ""arxiv:1910.09700"", ""base_model:unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""base_model:finetune:unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF\ndatasets:\n- pohsjxx/drone-logistics-cot-dataset\nlanguage:\n- zh\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: question-answering\ntags:\n- unsloth"", ""widget_data"": [{""text"": ""\u6211\u4f4f\u5728\u54ea\u91cc\uff1f"", ""context"": ""\u6211\u53eb\u6c83\u5c14\u592b\u5188\uff0c\u6211\u4f4f\u5728\u67cf\u6797\u3002""}, {""text"": ""\u6211\u4f4f\u5728\u54ea\u91cc\uff1f"", ""context"": ""\u6211\u53eb\u8428\u62c9\uff0c\u6211\u4f4f\u5728\u4f26\u6566\u3002""}, {""text"": ""\u6211\u7684\u540d\u5b57\u662f\u4ec0\u4e48\uff1f"", ""context"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u4f4f\u5728\u4f2f\u514b\u5229\u3002""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c>'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<|finetune_right_pad_id|>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-05 09:16:31+00:00"", ""cardData"": ""base_model:\n- unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF\ndatasets:\n- pohsjxx/drone-logistics-cot-dataset\nlanguage:\n- zh\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: question-answering\ntags:\n- unsloth"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67a2df376de2b0856429e9c0"", ""modelId"": ""pohsjxx/u-vertex-r1"", ""usedStorage"": 16245597866}",1,,0,,0,https://huggingface.co/mradermacher/u-vertex-r1-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=pohsjxx/u-vertex-r1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bpohsjxx%2Fu-vertex-r1%5D(%2Fpohsjxx%2Fu-vertex-r1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
546
+ Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned,"---
547
+ license: apache-2.0
548
+ datasets:
549
+ - maanasharma5/arabic_sft_data
550
+ language:
551
+ - ar
552
+ - en
553
+ base_model:
554
+ - unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF
555
+ pipeline_tag: question-answering
556
+ tags:
557
+ - unsloth
558
+ - trl
559
+ - sft
560
+ ---
561
+ # Arabic-Reasoning-LLM: Fine-Tuning DeepSeek-R1-Llama3-8B for Advanced Arabic Reasoning
562
+
563
+ [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
564
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
565
+ [![Hugging Face](https://img.shields.io/badge/%F0%9F%A4%97-Hugging%20Face-yellow)](https://huggingface.co/Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned)
566
+ [![Kaggle](https://img.shields.io/badge/Kaggle-035a7d?logo=kaggle&logoColor=white)](https://www.kaggle.com/code/paulaadel/deepseek-r1-distill-llama-3/edit)
567
+ https://wandb.ai/pakks/Fine-tune-DeepSeek-R1-Distill-Llama-8B%20on%20Medical%20COT%20Dataset/reports/Fine-tuning-Deepseek-r1-distill-llama3-8b-on-arabic-dataset--VmlldzoxMjAxMDEzOQ
568
+
569
+ **Arabic-Reasoning-LLM** is a specialized language model optimized for advanced reasoning tasks in Arabic, built through efficient fine-tuning of the DeepSeek-R1-Llama3-8B architecture using state-of-the-art optimization techniques and curated Arabic datasets.
570
+
571
+ ## Overview
572
+
573
+ This project addresses the critical need for high-performance Arabic reasoning models by implementing:
574
+ - **Domain-Specific Fine-Tuning**: Leveraging carefully curated Arabic datasets spanning logical reasoning, mathematical problem-solving, and cultural context understanding
575
+ - **Optimized Training Pipeline**: Utilizing Unsloth's memory-efficient framework and DeepSeek's R1 distillation techniques
576
+ - **Cultural & Linguistic Adaptation**: Specialized tokenization and alignment for Arabic syntax and semantic structures
577
+
578
+ ## Key Features
579
+
580
+ - 🚀 **4x Faster Training** with Unsloth's memory-optimized LoRA implementation
581
+ - 🖥️ **Kaggle-Ready** with full GPU-accelerated notebook support
582
+ - 📈 **23% Improved Accuracy** on Arabic reasoning benchmarks compared to base model
583
+ - 🎯 **Task-Specific Adaptation** for:
584
+ - Logical deduction
585
+ - Cultural context understanding
586
+ - Multi-step Arabic textual reasoning
587
+ - 🌍 **Full Arabic Script Support** with extended tokenizer vocabulary
588
+ - 📦 **Hugging Face Integration** for seamless deployment
589
+
590
+ ## Model Architecture
591
+
592
+ ```mermaid
593
+ graph TD
594
+ A[Base Model: DeepSeek-R1-Llama3-8B] --> B[Arabic Dataset Curation]
595
+ B --> C[Unsloth Optimization Layer]
596
+ C --> D[Adaptive LoRA Fine-Tuning]
597
+ D --> E[Cultural Context Alignment]
598
+ E --> F[Arabic-Reasoning-LLM]
599
+ ","{""id"": ""Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned"", ""author"": ""Paula139"", ""sha"": ""aa307dd043f61813349fc239ed2f621e919bea9a"", ""last_modified"": ""2025-03-31 15:49:52+00:00"", ""created_at"": ""2025-03-27 16:17:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""pytorch"", ""safetensors"", ""llama"", ""unsloth"", ""trl"", ""sft"", ""question-answering"", ""ar"", ""en"", ""dataset:maanasharma5/arabic_sft_data"", ""base_model:unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""base_model:finetune:unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF\ndatasets:\n- maanasharma5/arabic_sft_data\nlanguage:\n- ar\n- en\nlicense: apache-2.0\npipeline_tag: question-answering\ntags:\n- unsloth\n- trl\n- sft"", ""widget_data"": [{""text"": ""\u0623\u064a\u0646 \u0623\u0633\u0643\u0646\u061f"", ""context"": ""\u0625\u0633\u0645\u064a \u0645\u062d\u0645\u062f \u0648\u0623\u0633\u0643\u0646 \u0641\u064a \u0628\u064a\u0631\u0648\u062a""}, {""text"": ""\u0623\u064a\u0646 \u0623\u0633\u0643\u0646\u061f"", ""context"": ""\u0625\u0633\u0645\u064a \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064a \u0644\u0646\u062f\u0646""}, {""text"": ""\u0645\u0627 \u0627\u0633\u0645\u064a\u061f"", ""context"": ""\u0627\u0633\u0645\u064a \u0633\u0639\u064a\u062f \u0648\u0623\u0633\u0643\u0646 \u0641\u064a \u062d\u064a\u0641\u0627.""}, {""text"": ""\u0645\u0627 \u0644\u0642\u0628 \u062e\u0627\u0644\u062f \u0628\u0646 \u0627\u0644\u0648\u0644\u064a\u062f \u0628\u0627\u0644\u0639\u0631\u0628\u064a\u0629\u061f"", ""context"": ""\u062e\u0627\u0644\u062f \u0628\u0646 \u0627\u0644\u0648\u0644\u064a\u062f \u0645\u0646 \u0623\u0628\u0637\u0627\u0644 \u0648\u0642\u0627\u062f\u0629 \u0627\u0644\u0641\u062a\u062d \u0627\u0644\u0625\u0633\u0644\u0627\u0645\u064a \u0648\u0642\u062f \u062a\u062d\u062f\u062b\u062a \u0639\u0646\u0647 \u0627\u0644\u0644\u063a\u0627\u062a \u0627\u0644\u0625\u0646\u062c\u0644\u064a\u0632\u064a\u0629 \u0648\u0627\u0644\u0641\u0631\u0646\u0633\u064a\u0629 \u0648\u0627\u0644\u0625\u0633\u0628\u0627\u0646\u064a\u0629 \u0648\u0644\u0642\u0628 \u0628\u0633\u064a\u0641 \u0627\u0644\u0644\u0647 \u0627\u0644\u0645\u0633\u0644\u0648\u0644.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<\uff5cUser\uff5c>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<\uff5cAssistant\uff5c><\uff5ctool\u2581calls\u2581begin\uff5c><\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<\uff5ctool\u2581call\u2581begin\uff5c>' + tool['type'] + '<\uff5ctool\u2581sep\uff5c>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<\uff5ctool\u2581call\u2581end\uff5c>'}}{{'<\uff5ctool\u2581calls\u2581end\uff5c><\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>' + message['content'] + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<\uff5cAssistant\uff5c>' + content + '<\uff5cend\u2581of\u2581sentence\uff5c>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<\uff5ctool\u2581outputs\u2581begin\uff5c><\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<\uff5ctool\u2581output\u2581begin\uff5c>' + message['content'] + '<\uff5ctool\u2581output\u2581end\uff5c>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<\uff5ctool\u2581outputs\u2581end\uff5c>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<\uff5cAssistant\uff5c><think>\\n'}}{% endif %}"", ""eos_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""pad_token"": ""<|finetune_right_pad_id|>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00004.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-31 15:49:52+00:00"", ""cardData"": ""base_model:\n- unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF\ndatasets:\n- maanasharma5/arabic_sft_data\nlanguage:\n- ar\n- en\nlicense: apache-2.0\npipeline_tag: question-answering\ntags:\n- unsloth\n- trl\n- sft"", ""transformersInfo"": null, ""_id"": ""67e57a13f9cf40ac46d4df52"", ""modelId"": ""Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned"", ""usedStorage"": 32474125008}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Paula139/DeepSeek-R1-destill-llama3-8b-arabic-fine-tuned&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPaula139%2FDeepSeek-R1-destill-llama3-8b-arabic-fine-tuned%5D(%2FPaula139%2FDeepSeek-R1-destill-llama3-8b-arabic-fine-tuned)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Emu3-Gen_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ BAAI/Emu3-Gen,N/A,N/A,0,https://huggingface.co/lodrick-the-lafted/Emu3-Gen-12B,1,,0,,0,,0,"BAAI/Emu3, Nymbo/Emu3, akhaliq/emu3, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=BAAI/Emu3-Gen&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BBAAI%2FEmu3-Gen%5D(%2FBAAI%2FEmu3-Gen)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",5
3
+ lodrick-the-lafted/Emu3-Gen-12B,"---
4
+ license: apache-2.0
5
+ base_model:
6
+ - BAAI/Emu3-Gen
7
+ library_name: transformers
8
+ tags:
9
+ - merge
10
+ ---
11
+
12
+ This is an interpolated upscale of [BAAI/Emu3-Gen](https://huggingface.co/BAAI/Emu3-Gen) from 8B to 11.5B.
13
+ For each layer in [7,8,9,10,11,12,13,14,15,16,22,23,24], the weights were lerp'd between the previous layer and the current and inserted between the two.
14
+
15
+ Expansion script is [here](https://huggingface.co/lodrick-the-lafted/Emu3-Gen-12B/blob/main/emu3_expand.py).","{""id"": ""lodrick-the-lafted/Emu3-Gen-12B"", ""author"": ""lodrick-the-lafted"", ""sha"": ""024c1c9d5e88f545d92c207b8826fb420a314d29"", ""last_modified"": ""2024-11-26 14:43:23+00:00"", ""created_at"": ""2024-09-30 14:27:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""Emu3"", ""text-generation"", ""merge"", ""custom_code"", ""base_model:BAAI/Emu3-Gen"", ""base_model:finetune:BAAI/Emu3-Gen"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- BAAI/Emu3-Gen\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- merge"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""Emu3ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_emu3.Emu3Config"", ""AutoModelForCausalLM"": ""modeling_emu3.Emu3ForCausalLM""}, ""model_type"": ""Emu3"", ""tokenizer_config"": {""bos_token"": ""<|extra_203|>"", ""eos_token"": ""<|extra_204|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_emu3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='emu3.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='emu3_expand.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='emu3_vision_tokens.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00011.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_emu3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_emu3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_emu3.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='utils_emu3.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 11545583616}, ""total"": 11545583616}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-26 14:43:23+00:00"", ""cardData"": ""base_model:\n- BAAI/Emu3-Gen\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66fab56ca9312392f2e3d054"", ""modelId"": ""lodrick-the-lafted/Emu3-Gen-12B"", ""usedStorage"": 46182382808}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=lodrick-the-lafted/Emu3-Gen-12B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blodrick-the-lafted%2FEmu3-Gen-12B%5D(%2Flodrick-the-lafted%2FEmu3-Gen-12B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
FastHunyuan_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ FastVideo/FastHunyuan,"---
3
+ pipeline_tag: text-to-video
4
+ license: other
5
+ license_name: tencent-hunyuan-community
6
+ license_link: LICENSE
7
+ ---
8
+
9
+ <p align=""center"">
10
+ <img src=""assets/logo.jpg"" height=30>
11
+ </p>
12
+
13
+ # FastHunyuan Model Card
14
+
15
+ ## Model Details
16
+
17
+ FastHunyuan is an accelerated [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo) model. It can sample high quality videos with 6 diffusion steps. That brings around 8X speed up compared to the original HunyuanVideo with 50 steps.
18
+
19
+ - **Developed by**: [Hao AI Lab](https://hao-ai-lab.github.io/)
20
+ - **License**: tencent-hunyuan-community
21
+ - **Distilled from**: [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo)
22
+ - **Github Repository**: https://github.com/hao-ai-lab/FastVideo
23
+
24
+ ## Usage
25
+
26
+ - Clone [Fastvideo](https://github.com/hao-ai-lab/FastVideo) repository and follow the inference instructions in the README.
27
+ - Alternatively, you can inference FastHunyuan using the official [Hunyuan Video repository](https://github.com/Tencent/HunyuanVideo) by **setting the shift to 17 and steps to 6, resolution to 720X1280X125, and cfg bigger than 6**.
28
+ We find that a large CFG scale generally leads to faster videos.
29
+
30
+ ## Training details
31
+
32
+ FastHunyuan is consistency distillated on the [MixKit](https://huggingface.co/datasets/LanguageBind/Open-Sora-Plan-v1.1.0/tree/main) dataset with the following hyperparamters:
33
+ - Batch size: 16
34
+ - Resulotion: 720x1280
35
+ - Num of frames: 125
36
+ - Train steps: 320
37
+ - GPUs: 32
38
+ - LR: 1e-6
39
+ - Loss: huber
40
+
41
+ ## Evaluation
42
+ We provide some qualitative comparison between FastHunyuan 6 step inference v.s. the original Hunyuan with 6 step inference:
43
+
44
+ | FastHunyuan 6 step | Hunyuan 6 step |
45
+ | --- | --- |
46
+ | ![FastHunyuan 6 step](assets/distilled/1.gif) | ![Hunyuan 6 step](assets/undistilled/1.gif) |
47
+ | ![FastHunyuan 6 step](assets/distilled/2.gif) | ![Hunyuan 6 step](assets/undistilled/2.gif) |
48
+ | ![FastHunyuan 6 step](assets/distilled/3.gif) | ![Hunyuan 6 step](assets/undistilled/3.gif) |
49
+ | ![FastHunyuan 6 step](assets/distilled/4.gif) | ![Hunyuan 6 step](assets/undistilled/4.gif) |
50
+
51
+ ## Memory requirements
52
+
53
+ Please check our github repo for details. https://github.com/hao-ai-lab/FastVideo
54
+
55
+ For inference, we can inference FastHunyuan on single RTX4090. We now support NF4 and LLM-INT8 quantized inference using BitsAndBytes for FastHunyuan. With NF4 quantization, inference can be performed on a single RTX 4090 GPU, requiring just 20GB of VRAM.
56
+
57
+ For Lora Finetune, minimum hardware requirement
58
+ - 40 GB GPU memory each for 2 GPUs with lora
59
+ - 30 GB GPU memory each for 2 GPUs with CPU offload and lora.
60
+ ","{""id"": ""FastVideo/FastHunyuan"", ""author"": ""FastVideo"", ""sha"": ""7113d8031b751b8734bdc08d60d2cbe27b16dca2"", ""last_modified"": ""2025-01-08 00:48:34+00:00"", ""created_at"": ""2024-12-16 00:27:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 97, ""downloads_all_time"": null, ""likes"": 186, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""safetensors"", ""text-to-video"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE\npipeline_tag: text-to-video"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/distilled/1.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/distilled/2.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/distilled/3.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/distilled/4.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/undistilled/1.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/undistilled/2.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/undistilled/3.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/undistilled/4.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-t2v-720p/vae/pytorch_model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/flax_model.msgpack', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-08 00:48:34+00:00"", ""cardData"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE\npipeline_tag: text-to-video"", ""transformersInfo"": null, ""_id"": ""675f73fd9efed00f4da7ae05"", ""modelId"": ""FastVideo/FastHunyuan"", ""usedStorage"": 50190929002}",0,,0,,0,https://huggingface.co/city96/FastHunyuan-gguf,1,,0,huggingface/InferenceSupport/discussions/new?title=FastVideo/FastHunyuan&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BFastVideo%2FFastHunyuan%5D(%2FFastVideo%2FFastHunyuan)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
GPT-JT-6B-v1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ togethercomputer/GPT-JT-6B-v1,"---
3
+ datasets:
4
+ - natural_instructions
5
+ - the_pile
6
+ - cot
7
+ - Muennighoff/P3
8
+ inference:
9
+ parameters:
10
+ max_new_tokens: 5
11
+ temperature: 1.0
12
+ top_k: 1
13
+ license: apache-2.0
14
+ language:
15
+ - en
16
+ pipeline_tag: text-generation
17
+ widget:
18
+ -
19
+ example_title: ""Sentiment Analysis""
20
+ text: |-
21
+ The task is to label the post's emotion as sadness, joy, love, anger, fear, or surprise.
22
+
23
+ Input: I'm feeling quite sad and sorry for myself but ill snap out of it soon.
24
+ Output: sadness
25
+
26
+ Input: I am just feeling cranky and blue.
27
+ Output: anger
28
+
29
+ Input: I can have for a treat or if i am feeling festive.
30
+ Output:
31
+ -
32
+ example_title: ""Country Currency""
33
+ text: |-
34
+ Return the currency of the given country.
35
+
36
+ Input: Switzerland
37
+ Output: Swiss Franc
38
+
39
+ Input: India
40
+ Output:
41
+ -
42
+ example_title: ""Tweet Eval Hate""
43
+ text: |-
44
+ Label whether the following tweet contains hate speech against either immigrants or women. Hate Speech (HS) is commonly defined as any communication that disparages a person or a group on the basis of some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristics.
45
+ Possible labels:
46
+ 1. hate speech
47
+ 2. not hate speech
48
+
49
+ Tweet: HOW REFRESHING! In South Korea, there is no such thing as 'political correctness"" when it comes to dealing with Muslim refugee wannabes via @user
50
+ Label: hate speech
51
+
52
+ Tweet: New to Twitter-- any men on here know what the process is to get #verified?
53
+ Label: not hate speech
54
+
55
+ Tweet: Dont worry @user you are and will always be the most hysterical woman.
56
+ Label:
57
+ -
58
+ example_title: ""Entity Recognition""
59
+ text: |-
60
+ Extract all the names of people, places, and organizations from the following sentences.
61
+
62
+ Sentence: Satya Nadella, the CEO of Microsoft, was visiting the Bahamas last May.
63
+ Entities: Satya Nadella, Microsoft, Bahamas
64
+
65
+ Sentence: Pacific Northwest cities include Seattle and Portland, which I have visited with Vikash.
66
+ Entities:
67
+ -
68
+ example_title: ""Data Clearning""
69
+ text: |-
70
+ Format the data into a CSV file:
71
+
72
+ Input: Jane Doe jane.doe@gmail.com (520) 382 2435
73
+ Output: Jane Doe,jane.doe@gmail.com,520-382-2435
74
+
75
+ Input: Peter Lee (510) 333-2429 email: peter@yahoo.com
76
+ Output:
77
+ ---
78
+
79
+ <h1 style=""font-size: 42px"">GPT-JT<h1/>
80
+
81
+
82
+ ***<p style=""font-size: 24px"">Feel free to try out our [Online Demo](https://huggingface.co/spaces/togethercomputer/GPT-JT)!</p>***
83
+
84
+
85
+ # Model Summary
86
+
87
+ > With a new decentralized training algorithm, we fine-tuned GPT-J (6B) on 3.53 billion tokens, resulting in GPT-JT (6B), a model that outperforms many 100B+ parameter models on classification benchmarks.
88
+
89
+ We incorporated a collection of open techniques and datasets to build GPT-JT:
90
+ - GPT-JT is a fork of [EleutherAI](https://www.eleuther.ai)'s [GPT-J (6B)](https://huggingface.co/EleutherAI/gpt-j-6B);
91
+ - We used [UL2](https://github.com/google-research/google-research/tree/master/ul2)'s training objective, allowing the model to see bidirectional context of the prompt;
92
+ - The model was trained on a large collection of diverse data, including [Chain-of-Thought (CoT)](https://ai.googleblog.com/2022/05/language-models-perform-reasoning-via.html), [Public Pool of Prompts (P3) dataset](https://huggingface.co/datasets/bigscience/P3), [Natural-Instructions (NI) dataset](https://github.com/allenai/natural-instructions).
93
+
94
+ With the help of techniques mentioned above, GPT-JT significantly improves the performance of classification tasks over the original GPT-J, and even outperforms most 100B+ parameter models!
95
+
96
+ # Quick Start
97
+
98
+ ```python
99
+ from transformers import pipeline
100
+ pipe = pipeline(model='togethercomputer/GPT-JT-6B-v1')
101
+ pipe('''""I love this!"" Is it positive? A:''')
102
+ ```
103
+ or
104
+ ```python
105
+ from transformers import AutoTokenizer, AutoModelForCausalLM
106
+ tokenizer = AutoTokenizer.from_pretrained(""togethercomputer/GPT-JT-6B-v1"")
107
+ model = AutoModelForCausalLM.from_pretrained(""togethercomputer/GPT-JT-6B-v1"")
108
+ ```
109
+
110
+ # License
111
+
112
+ The weights of GPT-JT-6B-v1 are licensed under version 2.0 of the Apache License.
113
+
114
+ # Training Details
115
+
116
+ ## UL2 Training Objective
117
+
118
+ We train GPT-JT using UL2 training objective [1][2].
119
+ The original GPT-J uses causal mask (as shown below left) for autoregressive generation. So for each token, it can only see its previous context.
120
+ In order to fully leverage the context information, we continue to train GPT-J with UL2 training objectives, and uses causal mask with prefix (as shown below right) -- using bidirectional attention for the prompt / input and causal attention for token generation.
121
+ Intuitively, being able to see context bidirectionally might improve downstream tasks that require this information.
122
+
123
+ $$
124
+ \begin{bmatrix}
125
+ 1 & 0 & 0 & 0 & 0 \\
126
+ 1 & 1 & 0 & 0 & 0 \\
127
+ 1 & 1 & 1 & 0 & 0 \\
128
+ 1 & 1 & 1 & 1 & 0 \\
129
+ 1 & 1 & 1 & 1 & 1
130
+ \end{bmatrix}
131
+
132
+ \begin{bmatrix}
133
+ 1 & 1 & 1 & 0 & 0 \\
134
+ 1 & 1 & 1 & 0 & 0 \\
135
+ 1 & 1 & 1 & 0 & 0 \\
136
+ 1 & 1 & 1 & 1 & 0 \\
137
+ 1 & 1 & 1 & 1 & 1
138
+ \end{bmatrix}
139
+ $$
140
+
141
+ Furthermore, we leverage a large collection of data, including [Natural-Instructions](https://github.com/allenai/natural-instructions), [P3](https://huggingface.co/datasets/Muennighoff/P3), [MMLU-COT](https://github.com/jasonwei20/flan-2/blob/main/mmlu-cot.json), and [the Pile](https://huggingface.co/datasets/the_pile)
142
+ Specifically, we first conduct training for 2.62 billion tokens using the UL2 loss on the Pile, followed by 0.92 billion tokens with a mixture of the above datasets: 5% of COT, 20% of P3, 20% of NI, and 55% of the Pile.
143
+
144
+ ## Hyperparameters
145
+
146
+ We used AdamW with a learning rate of 1e-5 and global batch size of 64 (16 for each data parallel worker).
147
+ We used mix-precision training where the activation is in FP16 while the optimizer states are kept in FP32.
148
+ We use both data parallelism and pipeline parallelism to conduct training.
149
+ During training, we truncate the input sequence to 2048 tokens, and for input sequence that contains less than 2048 tokens, we concatenate multiple sequences into one long sequence to improve the data efficiency.
150
+
151
+ ## Infrastructure
152
+
153
+ We used [the Together Research Computer](https://together.xyz/) to conduct training.
154
+
155
+ # References
156
+
157
+ [1]: Tay, Yi, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, and Donald Metzler. ""Unifying Language Learning Paradigms."" arXiv preprint arXiv:2205.05131 (2022).
158
+
159
+ [2]: Tay, Yi, Jason Wei, Hyung Won Chung, Vinh Q. Tran, David R. So, Siamak Shakeri, Xavier Garcia et al. ""Transcending scaling laws with 0.1% extra compute."" arXiv preprint arXiv:2210.11399 (2022).","{""id"": ""togethercomputer/GPT-JT-6B-v1"", ""author"": ""togethercomputer"", ""sha"": ""f34aa35f906895602c1f86f5685e598afdea8051"", ""last_modified"": ""2023-01-24 06:08:17+00:00"", ""created_at"": ""2022-11-24 06:09:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4479, ""downloads_all_time"": null, ""likes"": 302, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gptj"", ""text-generation"", ""en"", ""dataset:natural_instructions"", ""dataset:the_pile"", ""dataset:cot"", ""dataset:Muennighoff/P3"", ""arxiv:2205.05131"", ""arxiv:2210.11399"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- natural_instructions\n- the_pile\n- cot\n- Muennighoff/P3\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ninference:\n parameters:\n max_new_tokens: 5\n temperature: 1\n top_k: 1\nwidget:\n- example_title: Sentiment Analysis\n text: 'The task is to label the post''s emotion as sadness, joy, love, anger, fear,\n or surprise.\n\n\n Input: I''m feeling quite sad and sorry for myself but ill snap out of it soon.\n\n Output: sadness\n\n\n Input: I am just feeling cranky and blue.\n\n Output: anger\n\n\n Input: I can have for a treat or if i am feeling festive.\n\n Output:'\n- example_title: Country Currency\n text: 'Return the currency of the given country.\n\n\n Input: Switzerland\n\n Output: Swiss Franc\n\n\n Input: India\n\n Output:'\n- example_title: Tweet Eval Hate\n text: 'Label whether the following tweet contains hate speech against either immigrants\n or women. Hate Speech (HS) is commonly defined as any communication that disparages\n a person or a group on the basis of some characteristic such as race, color, ethnicity,\n gender, sexual orientation, nationality, religion, or other characteristics.\n\n Possible labels:\n\n 1. hate speech\n\n 2. not hate speech\n\n\n Tweet: HOW REFRESHING! In South Korea, there is no such thing as ''political correctness\""\n when it comes to dealing with Muslim refugee wannabes via @user\n\n Label: hate speech\n\n\n Tweet: New to Twitter-- any men on here know what the process is to get #verified?\n\n Label: not hate speech\n\n\n Tweet: Dont worry @user you are and will always be the most hysterical woman.\n\n Label:'\n- example_title: Entity Recognition\n text: 'Extract all the names of people, places, and organizations from the following\n sentences.\n\n\n Sentence: Satya Nadella, the CEO of Microsoft, was visiting the Bahamas last May.\n\n Entities: Satya Nadella, Microsoft, Bahamas\n\n\n Sentence: Pacific Northwest cities include Seattle and Portland, which I have\n visited with Vikash.\n\n Entities:'\n- example_title: Data Clearning\n text: 'Format the data into a CSV file:\n\n\n Input: Jane Doe jane.doe@gmail.com (520) 382 2435\n\n Output: Jane Doe,jane.doe@gmail.com,520-382-2435\n\n\n Input: Peter Lee (510) 333-2429 email: peter@yahoo.com\n\n Output:'"", ""widget_data"": [{""example_title"": ""Sentiment Analysis"", ""text"": ""The task is to label the post's emotion as sadness, joy, love, anger, fear, or surprise.\n\nInput: I'm feeling quite sad and sorry for myself but ill snap out of it soon.\nOutput: sadness\n\nInput: I am just feeling cranky and blue.\nOutput: anger\n\nInput: I can have for a treat or if i am feeling festive.\nOutput:""}, {""example_title"": ""Country Currency"", ""text"": ""Return the currency of the given country.\n\nInput: Switzerland\nOutput: Swiss Franc\n\nInput: India\nOutput:""}, {""example_title"": ""Tweet Eval Hate"", ""text"": ""Label whether the following tweet contains hate speech against either immigrants or women. Hate Speech (HS) is commonly defined as any communication that disparages a person or a group on the basis of some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristics.\nPossible labels:\n1. hate speech\n2. not hate speech\n\nTweet: HOW REFRESHING! In South Korea, there is no such thing as 'political correctness\"" when it comes to dealing with Muslim refugee wannabes via @user\nLabel: hate speech\n\nTweet: New to Twitter-- any men on here know what the process is to get #verified?\nLabel: not hate speech\n\nTweet: Dont worry @user you are and will always be the most hysterical woman.\nLabel:""}, {""example_title"": ""Entity Recognition"", ""text"": ""Extract all the names of people, places, and organizations from the following sentences.\n\nSentence: Satya Nadella, the CEO of Microsoft, was visiting the Bahamas last May.\nEntities: Satya Nadella, Microsoft, Bahamas\n\nSentence: Pacific Northwest cities include Seattle and Portland, which I have visited with Vikash.\nEntities:""}, {""example_title"": ""Data Clearning"", ""text"": ""Format the data into a CSV file:\n\nInput: Jane Doe jane.doe@gmail.com (520) 382 2435\nOutput: Jane Doe,jane.doe@gmail.com,520-382-2435\n\nInput: Peter Lee (510) 333-2429 email: peter@yahoo.com\nOutput:""}], ""model_index"": null, ""config"": {""architectures"": [""GPTJForCausalLM""], ""model_type"": ""gptj"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""togethercomputer/GPT-JT"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""gsaivinay/open_llm_leaderboard"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""felixz/open_llm_leaderboard"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""BAAI/open_flageval_vlm_leaderboard"", ""b1sheng/kg_llm_leaderboard_test"", ""neubla/neubla-llm-evaluation-board"", ""srikanthsrnvs/togethercomputer-GPT-JT-6B-v1"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""akhaliq/GPT-JT-6B-v1"", ""Emkay/Asrix_Bot"", ""jbest2007/PersonalFinanceAssistant"", ""clam004/demo-app"", ""smothiki/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard_two"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""gorocdcdc/togethercomputer-GPT-JT-6B-v1"", ""asir0z/open_llm_leaderboard"", ""kbmlcoding/open_llm_leaderboard_free"", ""K00B404/One-stop-till-you-drop"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""Asiya057/Incarna-Mind"", ""Asiya057/Incarna-Mind-POC"", ""mjalg/IFEvalTR"", ""creaturebot/GPT-JT""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-01-24 06:08:17+00:00"", ""cardData"": ""datasets:\n- natural_instructions\n- the_pile\n- cot\n- Muennighoff/P3\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ninference:\n parameters:\n max_new_tokens: 5\n temperature: 1\n top_k: 1\nwidget:\n- example_title: Sentiment Analysis\n text: 'The task is to label the post''s emotion as sadness, joy, love, anger, fear,\n or surprise.\n\n\n Input: I''m feeling quite sad and sorry for myself but ill snap out of it soon.\n\n Output: sadness\n\n\n Input: I am just feeling cranky and blue.\n\n Output: anger\n\n\n Input: I can have for a treat or if i am feeling festive.\n\n Output:'\n- example_title: Country Currency\n text: 'Return the currency of the given country.\n\n\n Input: Switzerland\n\n Output: Swiss Franc\n\n\n Input: India\n\n Output:'\n- example_title: Tweet Eval Hate\n text: 'Label whether the following tweet contains hate speech against either immigrants\n or women. Hate Speech (HS) is commonly defined as any communication that disparages\n a person or a group on the basis of some characteristic such as race, color, ethnicity,\n gender, sexual orientation, nationality, religion, or other characteristics.\n\n Possible labels:\n\n 1. hate speech\n\n 2. not hate speech\n\n\n Tweet: HOW REFRESHING! In South Korea, there is no such thing as ''political correctness\""\n when it comes to dealing with Muslim refugee wannabes via @user\n\n Label: hate speech\n\n\n Tweet: New to Twitter-- any men on here know what the process is to get #verified?\n\n Label: not hate speech\n\n\n Tweet: Dont worry @user you are and will always be the most hysterical woman.\n\n Label:'\n- example_title: Entity Recognition\n text: 'Extract all the names of people, places, and organizations from the following\n sentences.\n\n\n Sentence: Satya Nadella, the CEO of Microsoft, was visiting the Bahamas last May.\n\n Entities: Satya Nadella, Microsoft, Bahamas\n\n\n Sentence: Pacific Northwest cities include Seattle and Portland, which I have\n visited with Vikash.\n\n Entities:'\n- example_title: Data Clearning\n text: 'Format the data into a CSV file:\n\n\n Input: Jane Doe jane.doe@gmail.com (520) 382 2435\n\n Output: Jane Doe,jane.doe@gmail.com,520-382-2435\n\n\n Input: Peter Lee (510) 333-2429 email: peter@yahoo.com\n\n Output:'"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""637f0a9e0bb954fb10e4837a"", ""modelId"": ""togethercomputer/GPT-JT-6B-v1"", ""usedStorage"": 24438556919}",0,,0,,0,,0,,0,"Asiya057/Incarna-Mind, BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, GTBench/GTBench, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Sharathhebbar24/One-stop-for-Open-source-models, Vikhrmodels/small-shlepa-lb, felixz/open_llm_leaderboard, gsaivinay/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=togethercomputer/GPT-JT-6B-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btogethercomputer%2FGPT-JT-6B-v1%5D(%2Ftogethercomputer%2FGPT-JT-6B-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kz-transformers/kaz-llm-lb, togethercomputer/GPT-JT",13
Hotshot-XL_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ hotshotco/Hotshot-XL,"---
3
+ license: openrail++
4
+ tags:
5
+ - text-to-video
6
+ - stable-diffusion
7
+ ---
8
+
9
+ ![image/gif](https://cdn-uploads.huggingface.co/production/uploads/637a6daf7ce76c3b83497ea2/ux_sZKB9snVPsKRT1TzfG.gif)
10
+
11
+ <font size=""32"">**Try Hotshot-XL yourself here**: https://www.hotshot.co</font>
12
+
13
+ Hotshot-XL is an AI text-to-GIF model trained to work alongside [Stable Diffusion XL](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0).
14
+
15
+ Hotshot-XL can generate GIFs with any fine-tuned SDXL model. This means two things:
16
+ 1. You’ll be able to make GIFs with any existing or newly fine-tuned SDXL model you may want to use.
17
+ 2. If you'd like to make GIFs of personalized subjects, you can load your own SDXL based LORAs, and not have to worry about fine-tuning Hotshot-XL. This is awesome because it’s usually much easier to find suitable images for training data than it is to find videos. It also hopefully fits into everyone's existing LORA usage/workflows :) See more [here](https://github.com/hotshotco/Hotshot-XL/blob/main/README.md#text-to-gif-with-personalized-loras).
18
+
19
+ Hotshot-XL is compatible with SDXL ControlNet to make GIFs in the composition/layout you’d like. See [here](https://github.com/hotshotco/Hotshot-XL/blob/main/README.md#text-to-gif-with-controlnet) for more info.
20
+
21
+ Hotshot-XL was trained to generate 1 second GIFs at 8 FPS.
22
+
23
+ Hotshot-XL was trained on various aspect ratios. For best results with the base Hotshot-XL model, we recommend using it with an SDXL model that has been fine-tuned with 512x512 images. You can find an SDXL model we fine-tuned for 512x512 resolutions [here](https://github.com/hotshotco/Hotshot-XL/blob/main/README.md#text-to-gif-with-personalized-loras).
24
+
25
+
26
+
27
+ ![image/gif](https://cdn-uploads.huggingface.co/production/uploads/637a6daf7ce76c3b83497ea2/XXgnk14nIasPdkvkPlDzn.gif)
28
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/637a6daf7ce76c3b83497ea2/6OknWOlsl9Zs_esGtPTlZ.jpeg)
29
+
30
+ Source code is available at https://github.com/hotshotco/Hotshot-XL.
31
+
32
+ # Model Description
33
+ - **Developed by**: Natural Synthetics Inc.
34
+ - **Model type**: Diffusion-based text-to-GIF generative model
35
+ - **License**: [CreativeML Open RAIL++-M License](https://huggingface.co/hotshotco/Hotshot-XL/raw/main/LICENSE.md)
36
+ - **Model Description**: This is a model that can be used to generate and modify GIFs based on text prompts. It is a Latent Diffusion Model that uses two fixed, pretrained text encoders (OpenCLIP-ViT/G and CLIP-ViT/L).
37
+ - **Resources for more information**: Check out our [GitHub Repository](https://github.com/hotshotco/Hotshot-XL).
38
+
39
+
40
+ # Limitations and Bias
41
+ ## Limitations
42
+ - The model does not achieve perfect photorealism
43
+ - The model cannot render legible text
44
+ - The model struggles with more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
45
+ - Faces and people in general may not be generated properly.
46
+
47
+ ## Bias
48
+ While the capabilities of video generation models are impressive, they can also reinforce or exacerbate social biases.","{""id"": ""hotshotco/Hotshot-XL"", ""author"": ""hotshotco"", ""sha"": ""300d6a691ab6d62e74348f2e0d430e6d82ee2864"", ""last_modified"": ""2023-10-11 17:44:31+00:00"", ""created_at"": ""2023-10-03 08:20:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2172, ""downloads_all_time"": null, ""likes"": 312, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-video"", ""stable-diffusion"", ""license:openrail++"", ""diffusers:HotshotXLPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: openrail++\ntags:\n- text-to-video\n- stable-diffusion"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""HotshotXLPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hsxl_temporal_layers.f16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hsxl_temporal_layers.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""TIGER-Lab/VideoScore-Leaderboard"", ""Rooni/TextToVideo"", ""7jimmy/imgtotext"", ""234bcn/gpt2"", ""crlizcan/test"", ""marselgames9/marselgames9-gif135"", ""marselgames9/marselgames9-gif135animation"", ""marselgames9/marselgames9-1234567gif"", ""soiz1/ComfyUI-Demo""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-11 17:44:31+00:00"", ""cardData"": ""license: openrail++\ntags:\n- text-to-video\n- stable-diffusion"", ""transformersInfo"": null, ""_id"": ""651bcecea2a4b126253ce4e1"", ""modelId"": ""hotshotco/Hotshot-XL"", ""usedStorage"": 16249041532}",0,,0,,0,,0,,0,"234bcn/gpt2, 7jimmy/imgtotext, Rooni/TextToVideo, TIGER-Lab/VideoScore-Leaderboard, crlizcan/test, huggingface/InferenceSupport/discussions/new?title=hotshotco/Hotshot-XL&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhotshotco%2FHotshot-XL%5D(%2Fhotshotco%2FHotshot-XL)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, marselgames9/marselgames9-1234567gif, marselgames9/marselgames9-gif135, marselgames9/marselgames9-gif135animation, soiz1/ComfyUI-Demo",10
Hunyuan3D-1_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ tencent/Hunyuan3D-1,"---
3
+ library_name: hunyuan3d-2
4
+ license: other
5
+ license_name: tencent-hunyuan-community
6
+ license_link: https://huggingface.co/tencent/Hunyuan3D-1/blob/main/LICENSE.txt
7
+ language:
8
+ - en
9
+ - zh
10
+ tags:
11
+ - image-to-3d
12
+ - text-to-3d
13
+ pipeline_tag: image-to-3d
14
+ ---
15
+ <!-- ## **Hunyuan3D-1.0** -->
16
+
17
+ <p align=""center"">
18
+ <img src=""./assets/logo.png"" height=200>
19
+ </p>
20
+
21
+ # Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation
22
+
23
+ <div align=""center"">
24
+ <a href=""https://github.com/tencent/Hunyuan3D-1""><img src=""https://img.shields.io/static/v1?label=Code&message=Github&color=blue&logo=github-pages""></a> &ensp;
25
+ <a href=""https://3d.hunyuan.tencent.com""><img src=""https://img.shields.io/static/v1?label=Homepage&message=Tencent Hunyuan3D&color=blue&logo=github-pages""></a> &ensp;
26
+ <a href=""https://arxiv.org/pdf/2411.02293""><img src=""https://img.shields.io/static/v1?label=Tech Report&message=Arxiv&color=red&logo=arxiv""></a> &ensp;
27
+ <a href=""https://huggingface.co/Tencent/Hunyuan3D-1""><img src=""https://img.shields.io/static/v1?label=Checkpoints&message=HuggingFace&color=yellow""></a> &ensp;
28
+ <a href=""https://huggingface.co/spaces/Tencent/Hunyuan3D-1""><img src=""https://img.shields.io/static/v1?label=Demo&message=HuggingFace&color=yellow""></a> &ensp;
29
+ </div>
30
+
31
+
32
+ ## 🔥🔥🔥 News!!
33
+
34
+ * Nov 5, 2024: 💬 We support demo running image_to_3d generation now. Please check the [script](#using-gradio) below.
35
+ * Nov 5, 2024: 💬 We support demo running text_to_3d generation now. Please check the [script](#using-gradio) below.
36
+
37
+
38
+ ## 📑 Open-source Plan
39
+
40
+ - [x] Inference
41
+ - [x] Checkpoints
42
+ - [ ] Baking related
43
+ - [ ] Training
44
+ - [ ] ComfyUI
45
+ - [ ] Distillation Version
46
+ - [ ] TensorRT Version
47
+
48
+
49
+
50
+ ## **Abstract**
51
+ <p align=""center"">
52
+ <img src=""./assets/teaser.png"" height=450>
53
+ </p>
54
+
55
+ While 3D generative models have greatly improved artists' workflows, the existing diffusion models for 3D generation suffer from slow generation and poor generalization. To address this issue, we propose a two-stage approach named Hunyuan3D-1.0 including a lite version and a standard version, that both support text- and image-conditioned generation.
56
+
57
+ In the first stage, we employ a multi-view diffusion model that efficiently generates multi-view RGB in approximately 4 seconds. These multi-view images capture rich details of the 3D asset from different viewpoints, relaxing the tasks from single-view to multi-view reconstruction. In the second stage, we introduce a feed-forward reconstruction model that rapidly and faithfully reconstructs the 3D asset given the generated multi-view images in approximately 7 seconds. The reconstruction network learns to handle noises and in-consistency introduced by the multi-view diffusion and leverages the available information from the condition image to efficiently recover the 3D structure.
58
+
59
+ Our framework involves the text-to-image model, i.e., Hunyuan-DiT, making it a unified framework to support both text- and image-conditioned 3D generation. Our standard version has 3x more parameters than our lite and other existing model. Our Hunyuan3D-1.0 achieves an impressive balance between speed and quality, significantly reducing generation time while maintaining the quality and diversity of the produced assets.
60
+
61
+
62
+ ## 🎉 **Hunyuan3D-1 Architecture**
63
+
64
+ <p align=""center"">
65
+ <img src=""./assets/overview_3.png"" height=400>
66
+ </p>
67
+
68
+
69
+ ## 📈 Comparisons
70
+
71
+ We have evaluated Hunyuan3D-1.0 with other open-source 3d-generation methods, our Hunyuan3D-1.0 received the highest user preference across 5 metrics. Details in the picture on the lower left.
72
+
73
+ The lite model takes around 10 seconds to produce a 3D mesh from a single image on an NVIDIA A100 GPU, while the standard model takes roughly 25 seconds. The plot laid out in the lower right demonstrates that Hunyuan3D-1.0 achieves an optimal balance between quality and efficiency.
74
+
75
+ <p align=""center"">
76
+ <img src=""./assets/radar.png"" height=300>
77
+ <img src=""./assets/runtime.png"" height=300>
78
+ </p>
79
+
80
+ ## Get Started
81
+
82
+ #### Begin by cloning the repository:
83
+
84
+ ```shell
85
+ git clone https://github.com/tencent/Hunyuan3D-1
86
+ cd Hunyuan3D-1
87
+ ```
88
+
89
+ #### Installation Guide for Linux
90
+
91
+ We provide an env_install.sh script file for setting up environment.
92
+
93
+ ```
94
+ # step 1, create conda env
95
+ conda create -n hunyuan3d-1 python=3.9 or 3.10 or 3.11 or 3.12
96
+ conda activate hunyuan3d-1
97
+
98
+ # step 2. install torch realated package
99
+ which pip # check pip corresponds to python
100
+
101
+ # modify the cuda version according to your machine (recommended)
102
+ pip install torch torchvision --index-url https://download.pytorch.org/whl/cu121
103
+
104
+ # step 3. install other packages
105
+ bash env_install.sh
106
+ ```
107
+ <details>
108
+ <summary>💡Other tips for envrionment installation</summary>
109
+
110
+ Optionally, you can install xformers or flash_attn to acclerate computation:
111
+
112
+ ```
113
+ pip install xformers --index-url https://download.pytorch.org/whl/cu121
114
+ ```
115
+ ```
116
+ pip install flash_attn
117
+ ```
118
+
119
+ Most environment errors are caused by a mismatch between machine and packages. You can try manually specifying the version, as shown in the following successful cases:
120
+ ```
121
+ # python3.9
122
+ pip install torch==2.0.1 torchvision==0.15.2 --index-url https://download.pytorch.org/whl/cu118
123
+ ```
124
+
125
+ when install pytorch3d, the gcc version is preferably greater than 9, and the gpu driver should not be too old.
126
+
127
+ </details>
128
+
129
+ #### Download Pretrained Models
130
+
131
+ The models are available at [https://huggingface.co/tencent/Hunyuan3D-1](https://huggingface.co/tencent/Hunyuan3D-1):
132
+
133
+ + `Hunyuan3D-1/lite`, lite model for multi-view generation.
134
+ + `Hunyuan3D-1/std`, standard model for multi-view generation.
135
+ + `Hunyuan3D-1/svrm`, sparse-view reconstruction model.
136
+
137
+
138
+ To download the model, first install the huggingface-cli. (Detailed instructions are available [here](https://huggingface.co/docs/huggingface_hub/guides/cli).)
139
+
140
+ ```shell
141
+ python3 -m pip install ""huggingface_hub[cli]""
142
+ ```
143
+
144
+ Then download the model using the following commands:
145
+
146
+ ```shell
147
+ mkdir weights
148
+ huggingface-cli download tencent/Hunyuan3D-1 --local-dir ./weights
149
+
150
+ mkdir weights/hunyuanDiT
151
+ huggingface-cli download Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled --local-dir ./weights/hunyuanDiT
152
+ ```
153
+
154
+ #### Inference
155
+ For text to 3d generation, we supports bilingual Chinese and English, you can use the following command to inference.
156
+ ```python
157
+ python3 main.py \
158
+ --text_prompt ""a lovely rabbit"" \
159
+ --save_folder ./outputs/test/ \
160
+ --max_faces_num 90000 \
161
+ --do_texture_mapping \
162
+ --do_render
163
+ ```
164
+
165
+ For image to 3d generation, you can use the following command to inference.
166
+ ```python
167
+ python3 main.py \
168
+ --image_prompt ""/path/to/your/image"" \
169
+ --save_folder ./outputs/test/ \
170
+ --max_faces_num 90000 \
171
+ --do_texture_mapping \
172
+ --do_render
173
+ ```
174
+ We list some more useful configurations for easy usage:
175
+
176
+ | Argument | Default | Description |
177
+ |:------------------:|:---------:|:---------------------------------------------------:|
178
+ |`--text_prompt` | None |The text prompt for 3D generation |
179
+ |`--image_prompt` | None |The image prompt for 3D generation |
180
+ |`--t2i_seed` | 0 |The random seed for generating images |
181
+ |`--t2i_steps` | 25 |The number of steps for sampling of text to image |
182
+ |`--gen_seed` | 0 |The random seed for generating 3d generation |
183
+ |`--gen_steps` | 50 |The number of steps for sampling of 3d generation |
184
+ |`--max_faces_numm` | 90000 |The limit number of faces of 3d mesh |
185
+ |`--save_memory` | False |module will move to cpu automatically|
186
+ |`--do_texture_mapping` | False |Change vertex shadding to texture shading |
187
+ |`--do_render` | False |render gif |
188
+
189
+
190
+ We have also prepared scripts with different configurations for reference
191
+ - Inference Std-pipeline requires 30GB VRAM (24G VRAM with --save_memory).
192
+ - Inference Lite-pipeline requires 22GB VRAM (18G VRAM with --save_memory).
193
+ - Note: --save_memory will increase inference time
194
+
195
+ ```bash
196
+ bash scripts/text_to_3d_std.sh
197
+ bash scripts/text_to_3d_lite.sh
198
+ bash scripts/image_to_3d_std.sh
199
+ bash scripts/image_to_3d_lite.sh
200
+ ```
201
+
202
+ If your gpu memory is 16G, you can try to run modules in pipeline seperately:
203
+ ```bash
204
+ bash scripts/text_to_3d_std_separately.sh 'a lovely rabbit' ./outputs/test # >= 16G
205
+ bash scripts/text_to_3d_lite_separately.sh 'a lovely rabbit' ./outputs/test # >= 14G
206
+ bash scripts/image_to_3d_std_separately.sh ./demos/example_000.png ./outputs/test # >= 16G
207
+ bash scripts/image_to_3d_lite_separately.sh ./demos/example_000.png ./outputs/test # >= 10G
208
+ ```
209
+
210
+ #### Using Gradio
211
+
212
+ We have prepared two versions of multi-view generation, std and lite.
213
+
214
+ ```shell
215
+ # std
216
+ python3 app.py
217
+ python3 app.py --save_memory
218
+
219
+ # lite
220
+ python3 app.py --use_lite
221
+ python3 app.py --use_lite --save_memory
222
+ ```
223
+
224
+ Then the demo can be accessed through http://0.0.0.0:8080. It should be noted that the 0.0.0.0 here needs to be X.X.X.X with your server IP.
225
+
226
+ ## Camera Parameters
227
+
228
+ Output views are a fixed set of camera poses:
229
+
230
+ + Azimuth (relative to input view): `+0, +60, +120, +180, +240, +300`.
231
+
232
+
233
+ ## Citation
234
+
235
+ If you found this repository helpful, please cite our report:
236
+ ```bibtex
237
+ @misc{yang2024tencent,
238
+ title={Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation},
239
+ author={Xianghui Yang and Huiwen Shi and Bowen Zhang and Fan Yang and Jiacheng Wang and Hongxu Zhao and Xinhai Liu and Xinzhou Wang and Qingxiang Lin and Jiaao Yu and Lifu Wang and Zhuo Chen and Sicong Liu and Yuhong Liu and Yong Yang and Di Wang and Jie Jiang and Chunchao Guo},
240
+ year={2024},
241
+ eprint={2411.02293},
242
+ archivePrefix={arXiv},
243
+ primaryClass={cs.CV}
244
+ }
245
+ ```","{""id"": ""tencent/Hunyuan3D-1"", ""author"": ""tencent"", ""sha"": ""e0315a42d8a0f5a63e57abeace1737d34d700109"", ""last_modified"": ""2025-03-22 13:37:23+00:00"", ""created_at"": ""2024-11-01 08:42:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3230, ""downloads_all_time"": null, ""likes"": 301, ""library_name"": ""hunyuan3d-2"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""hunyuan3d-2"", ""diffusers"", ""safetensors"", ""image-to-3d"", ""text-to-3d"", ""en"", ""zh"", ""arxiv:2411.02293"", ""license:other"", ""region:us""], ""pipeline_tag"": ""image-to-3d"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlibrary_name: hunyuan3d-2\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: https://huggingface.co/tencent/Hunyuan3D-1/blob/main/LICENSE.txt\npipeline_tag: image-to-3d\ntags:\n- image-to-3d\n- text-to-3d"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/overview_3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/radar.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/runtime.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/teaser.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/feature_extractor_clip/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/feature_extractor_vae/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/text_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/vision_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/vision_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_lite/vision_encoder/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/feature_extractor_vae/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/uc_text_emb.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/uc_text_emb_2.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/unet/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vision_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vision_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vision_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vision_encoder_2/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mvd_std/vision_processor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='svrm/svrm.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""tencent/Hunyuan3D-1"", ""TencentARC/FreeSplatter"", ""LPDoctor/AIGC-3D"", ""agrharsh4321/AIGC-3D"", ""Nymbo/Hunyuan3D-2"", ""MMD-Coder/Hunyuan3D-2.0"", ""sizifart/siz3d"", ""IsaacVal/First_agent_template""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-22 13:37:23+00:00"", ""cardData"": ""language:\n- en\n- zh\nlibrary_name: hunyuan3d-2\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: https://huggingface.co/tencent/Hunyuan3D-1/blob/main/LICENSE.txt\npipeline_tag: image-to-3d\ntags:\n- image-to-3d\n- text-to-3d"", ""transformersInfo"": null, ""_id"": ""67249474eca5299dbe3c085c"", ""modelId"": ""tencent/Hunyuan3D-1"", ""usedStorage"": 20506449683}",0,https://huggingface.co/img-gemina/text2speech,1,,0,,0,,0,"IsaacVal/First_agent_template, LPDoctor/AIGC-3D, MMD-Coder/Hunyuan3D-2.0, Nymbo/Hunyuan3D-2, Tencent/Hunyuan3D-1, TencentARC/FreeSplatter, agrharsh4321/AIGC-3D, huggingface/InferenceSupport/discussions/new?title=tencent/Hunyuan3D-1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btencent%2FHunyuan3D-1%5D(%2Ftencent%2FHunyuan3D-1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, sizifart/siz3d, tencent/Hunyuan3D-1",10
246
+ img-gemina/text2speech,"---
247
+ license: mit
248
+ datasets:
249
+ - BAAI/Infinity-MM
250
+ language:
251
+ - id
252
+ metrics:
253
+ - accuracy
254
+ base_model:
255
+ - tencent/Hunyuan3D-1
256
+ new_version: microsoft/OmniParser
257
+ library_name: transformers
258
+ ---","{""id"": ""img-gemina/text2speech"", ""author"": ""img-gemina"", ""sha"": ""82332d1849ff2f5137a50be6a468586aa092884c"", ""last_modified"": ""2024-11-22 11:28:44+00:00"", ""created_at"": ""2024-11-22 11:25:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""id"", ""dataset:BAAI/Infinity-MM"", ""base_model:tencent/Hunyuan3D-1"", ""base_model:finetune:tencent/Hunyuan3D-1"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/Hunyuan3D-1\ndatasets:\n- BAAI/Infinity-MM\nlanguage:\n- id\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- accuracy\nnew_version: microsoft/OmniParser"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='train.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-22 11:28:44+00:00"", ""cardData"": ""base_model:\n- tencent/Hunyuan3D-1\ndatasets:\n- BAAI/Infinity-MM\nlanguage:\n- id\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- accuracy\nnew_version: microsoft/OmniParser"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67406a40125bcd870384b481"", ""modelId"": ""img-gemina/text2speech"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=img-gemina/text2speech&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimg-gemina%2Ftext2speech%5D(%2Fimg-gemina%2Ftext2speech)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Hunyuan3D-2mv_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ tencent/Hunyuan3D-2mv,"---
3
+ library_name: hunyuan3d-2
4
+ license: other
5
+ license_name: tencent-hunyuan-community
6
+ license_link: https://huggingface.co/tencent/Hunyuan3D-2/blob/main/LICENSE.txt
7
+ language:
8
+ - en
9
+ - zh
10
+ tags:
11
+ - image-to-3d
12
+ - text-to-3d
13
+ pipeline_tag: image-to-3d
14
+ ---
15
+
16
+ <p align=""center"">
17
+ <img src=""https://huggingface.co/tencent/Hunyuan3D-2/resolve/main/assets/images/teaser.jpg"">
18
+ </p>
19
+
20
+ <div align=""center"">
21
+ <a href=https://3d.hunyuan.tencent.com target=""_blank""><img src=https://img.shields.io/badge/Hunyuan3D-black.svg?logo=homepage height=22px></a>
22
+ <a href=https://huggingface.co/spaces/tencent/Hunyuan3D-2mv target=""_blank""><img src=https://img.shields.io/badge/%F0%9F%A4%97%20Demo-276cb4.svg height=22px></a>
23
+ <a href=https://huggingface.co/tencent/Hunyuan3D-2mv target=""_blank""><img src=https://img.shields.io/badge/%F0%9F%A4%97%20Models-d96902.svg height=22px></a>
24
+ <a href=https://github.com/Tencent/Hunyuan3D-2 target=""_blank""><img src= https://img.shields.io/badge/Github-bb8a2e.svg?logo=github height=22px></a>
25
+ <a href=https://discord.gg/GuaWYwzKbX target=""_blank""><img src= https://img.shields.io/badge/Discord-white.svg?logo=discord height=22px></a>
26
+ <a href=https://github.com/Tencent/Hunyuan3D-2/blob/main/assets/report/Tencent_Hunyuan3D_2_0.pdf target=""_blank""><img src=https://img.shields.io/badge/Report-b5212f.svg?logo=arxiv height=22px></a>
27
+ </div>
28
+
29
+
30
+ [//]: # ( <a href=# target=""_blank""><img src=https://img.shields.io/badge/Report-b5212f.svg?logo=arxiv height=22px></a>)
31
+
32
+ [//]: # ( <a href=# target=""_blank""><img src= https://img.shields.io/badge/Colab-8f2628.svg?logo=googlecolab height=22px></a>)
33
+
34
+ [//]: # ( <a href=""#""><img alt=""PyPI - Downloads"" src=""https://img.shields.io/pypi/v/mulankit?logo=pypi"" height=22px></a>)
35
+
36
+ <br>
37
+ <p align=""center"">
38
+ “ Living out everyone’s imagination on creating and manipulating 3D assets.”
39
+ </p>
40
+
41
+ This repository contains the models of the paper [Hunyuan3D 2.0: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation](https://huggingface.co/papers/2501.12202).
42
+
43
+ **Hunyuan3D-2mv** is finetuned from [Hunyuan3D-2](https://huggingface.co/tencent/Hunyuan3D-2) to support multiview controlled shape generation.
44
+
45
+ ## 🤗 Get Started with Hunyuan3D 2mv
46
+
47
+ Here is a simple usage:
48
+
49
+ ```python
50
+ from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
51
+ pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(
52
+ 'tencent/Hunyuan3D-2mv',
53
+ subfolder='hunyuan3d-dit-v2-mv',
54
+ use_safetensors=True,
55
+ device='cuda'
56
+ )
57
+ mesh = pipeline(
58
+ image={
59
+ ""front"": ""your front view image.png"",
60
+ ""left"": ""your left view image.png"",
61
+ ""back"": ""your back view image.png""
62
+ },
63
+ num_inference_steps=30,
64
+ octree_resolution=380,
65
+ num_chunks=20000,
66
+ generator=torch.manual_seed(12345),
67
+ output_type='trimesh'
68
+ )[0]
69
+ ```
70
+
71
+ For code and more details on how to use it, refer to the [Github repository](https://github.com/Tencent/Hunyuan3D-2).
72
+
73
+
74
+
75
+ ## 🔗 BibTeX
76
+
77
+ If you found this repository helpful, please cite our report:
78
+
79
+ ```bibtex
80
+ @misc{hunyuan3d22025tencent,
81
+ title={Hunyuan3D 2.0: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation},
82
+ author={Tencent Hunyuan3D Team},
83
+ year={2025},
84
+ eprint={2501.12202},
85
+ archivePrefix={arXiv},
86
+ primaryClass={cs.CV}
87
+ }
88
+
89
+ @misc{yang2024tencent,
90
+ title={Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation},
91
+ author={Tencent Hunyuan3D Team},
92
+ year={2024},
93
+ eprint={2411.02293},
94
+ archivePrefix={arXiv},
95
+ primaryClass={cs.CV}
96
+ }
97
+ ```
98
+
99
+ ## Community Resources
100
+
101
+ Thanks for the contributions of community members, here we have these great extensions of Hunyuan3D 2.0:
102
+
103
+ - [ComfyUI-Hunyuan3DWrapper](https://github.com/kijai/ComfyUI-Hunyuan3DWrapper)
104
+ - [Hunyuan3D-2-for-windows](https://github.com/sdbds/Hunyuan3D-2-for-windows)
105
+ - [📦 A bundle for running on Windows | 整合包](https://github.com/YanWenKun/Comfy3D-WinPortable/releases/tag/r8-hunyuan3d2)
106
+
107
+ ## Acknowledgements
108
+
109
+ We would like to thank the contributors to
110
+ the [DINOv2](https://github.com/facebookresearch/dinov2), [Stable Diffusion](https://github.com/Stability-AI/stablediffusion), [FLUX](https://github.com/black-forest-labs/flux), [diffusers](https://github.com/huggingface/diffusers)
111
+ and [HuggingFace](https://huggingface.co) repositories, for their open research and exploration.
112
+
113
+ ","{""id"": ""tencent/Hunyuan3D-2mv"", ""author"": ""tencent"", ""sha"": ""ea1415a196ba61f465e923072172713aa023e6b0"", ""last_modified"": ""2025-03-19 16:12:59+00:00"", ""created_at"": ""2025-03-12 11:36:17+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9470, ""downloads_all_time"": null, ""likes"": 371, ""library_name"": ""hunyuan3d-2"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""hunyuan3d-2"", ""image-to-3d"", ""text-to-3d"", ""en"", ""zh"", ""arxiv:2501.12202"", ""arxiv:2411.02293"", ""license:other"", ""region:us""], ""pipeline_tag"": ""image-to-3d"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\nlibrary_name: hunyuan3d-2\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: https://huggingface.co/tencent/Hunyuan3D-2/blob/main/LICENSE.txt\npipeline_tag: image-to-3d\ntags:\n- image-to-3d\n- text-to-3d"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-fast/config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-fast/model.fp16.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-fast/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-turbo/config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-turbo/model.fp16.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv-turbo/model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv/config.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv/model.fp16.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan3d-dit-v2-mv/model.fp16.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""tencent/Hunyuan3D-2mv""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-19 16:12:59+00:00"", ""cardData"": ""language:\n- en\n- zh\nlibrary_name: hunyuan3d-2\nlicense: other\nlicense_name: tencent-hunyuan-community\nlicense_link: https://huggingface.co/tencent/Hunyuan3D-2/blob/main/LICENSE.txt\npipeline_tag: image-to-3d\ntags:\n- image-to-3d\n- text-to-3d"", ""transformersInfo"": null, ""_id"": ""67d171b18da5ca753ec1334e"", ""modelId"": ""tencent/Hunyuan3D-2mv"", ""usedStorage"": 29580814970}",0,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/143, tencent/Hunyuan3D-2mv",2
HunyuanVideo-I2V_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ tencent/HunyuanVideo-I2V,"---
3
+ license: other
4
+ license_name: tencent-hunyuan-community
5
+ license_link: LICENSE
6
+ pipeline_tag: image-to-video
7
+ ---
8
+ <!-- ## **HunyuanVideo** -->
9
+
10
+ [中文阅读](./README_zh.md)
11
+
12
+ <p align=""center"">
13
+ <img src=""https://raw.githubusercontent.com/Tencent/HunyuanVideo-I2V/refs/heads/main/assets/logo.png"" height=100>
14
+ </p>
15
+
16
+ # **HunyuanVideo-I2V** 🌅
17
+
18
+ -----
19
+
20
+ Following the great successful open-sourcing of our [HunyuanVideo](https://github.com/Tencent/HunyuanVideo), we proudly present the [HunyuanVideo-I2V](https://github.com/Tencent/HunyuanVideo-I2V), a new image-to-video generation framework to accelerate open-source community exploration!
21
+
22
+ This repo contains offical PyTorch model definitions, pre-trained weights and inference/sampling code. You can find more visualizations on our [project page](https://aivideo.hunyuan.tencent.com). Meanwhile, we have released the LoRA training code for customizable special effects, which can be used to create more interesting video effects.
23
+
24
+ > [**HunyuanVideo: A Systematic Framework For Large Video Generation Model**](https://arxiv.org/abs/2412.03603) <be>
25
+
26
+
27
+
28
+ ## 🔥🔥🔥 News!!
29
+ * Mar 13, 2025: 🚀 We release the parallel inference code for HunyuanVideo-I2V powered by [xDiT](https://github.com/xdit-project/xDiT).
30
+ * Mar 07, 2025: 🔥 We have fixed the bug in our open-source version that caused ID changes. Please try the new model weights of [HunyuanVideo-I2V](https://huggingface.co/tencent/HunyuanVideo-I2V) to ensure full visual consistency in the first frame and produce higher quality videos.
31
+ * Mar 06, 2025: 👋 We release the inference code and model weights of HunyuanVideo-I2V. [Download](https://github.com/Tencent/HunyuanVideo-I2V/blob/main/ckpts/README.md).
32
+
33
+
34
+ ## 📑 Open-source Plan
35
+ - HunyuanVideo-I2V (Image-to-Video Model)
36
+ - [x] Inference
37
+ - [x] Checkpoints
38
+ - [x] ComfyUI
39
+ - [x] Lora training scripts
40
+ - [x] Multi-gpus Sequence Parallel inference (Faster inference speed on more gpus)
41
+ - [ ] Diffusers
42
+
43
+ ## Contents
44
+ - [**HunyuanVideo-I2V** 🌅](#hunyuanvideo-i2v-)
45
+ - [🔥🔥🔥 News!!](#-news)
46
+ - [📑 Open-source Plan](#-open-source-plan)
47
+ - [Contents](#contents)
48
+ - [**HunyuanVideo-I2V Overall Architecture**](#hunyuanvideo-i2v-overall-architecture)
49
+ - [📜 Requirements](#-requirements)
50
+ - [🛠️ Dependencies and Installation](#️-dependencies-and-installation)
51
+ - [Installation Guide for Linux](#installation-guide-for-linux)
52
+ - [🧱 Download Pretrained Models](#-download-pretrained-models)
53
+ - [🔑 Single-gpu Inference](#-single-gpu-inference)
54
+ - [Tips for Using Image-to-Video Models](#tips-for-using-image-to-video-models)
55
+ - [Using Command Line](#using-command-line)
56
+ - [More Configurations](#more-configurations)
57
+ - [🎉 Customizable I2V LoRA effects training](#-customizable-i2v-lora-effects-training)
58
+ - [Requirements](#requirements)
59
+ - [Environment](#environment)
60
+ - [Training data construction](#training-data-construction)
61
+ - [Training](#training)
62
+ - [Inference](#inference)
63
+ - [🚀 Parallel Inference on Multiple GPUs by xDiT](#-parallel-inference-on-multiple-gpus-by-xdit)
64
+ - [Using Command Line](#using-command-line-1)
65
+ - [🔗 BibTeX](#-bibtex)
66
+ - [Acknowledgements](#acknowledgements)
67
+ ---
68
+
69
+ ## **HunyuanVideo-I2V Overall Architecture**
70
+ Leveraging the advanced video generation capabilities of [HunyuanVideo](https://github.com/Tencent/HunyuanVideo), we have extended its application to image-to-video generation tasks. To achieve this, we employ a token replace technique to effectively reconstruct and incorporate reference image information into the video generation process.
71
+
72
+ Since we utilizes a pre-trained Multimodal Large Language Model (MLLM) with a Decoder-Only architecture as the text encoder, we can significantly enhance the model's ability to comprehend the semantic content of the input image and to seamlessly integrate information from both the image and its associated caption. Specifically, the input image is processed by the MLLM to generate semantic image tokens. These tokens are then concatenated with the video latent tokens, enabling comprehensive full-attention computation across the combined data.
73
+
74
+ The overall architecture of our system is designed to maximize the synergy between image and text modalities, ensuring a robust and coherent generation of video content from static images. This integration not only improves the fidelity of the generated videos but also enhances the model's ability to interpret and utilize complex multimodal inputs. The overall architecture is as follows.
75
+ <p align=""center"">
76
+ <img src=""https://raw.githubusercontent.com/Tencent/HunyuanVideo-I2V/refs/heads/main/assets/backbone.png"" style=""max-width: 45%; height: auto;"">
77
+ </p>
78
+
79
+
80
+
81
+
82
+ ## 📜 Requirements
83
+
84
+ The following table shows the requirements for running HunyuanVideo-I2V model (batch size = 1) to generate videos:
85
+
86
+ | Model | Resolution | GPU Peak Memory |
87
+ |:----------------:|:-----------:|:----------------:|
88
+ | HunyuanVideo-I2V | 720p | 60GB |
89
+
90
+
91
+ * An NVIDIA GPU with CUDA support is required.
92
+ * The model is tested on a single 80G GPU.
93
+ * **Minimum**: The minimum GPU memory required is 60GB for 720p.
94
+ * **Recommended**: We recommend using a GPU with 80GB of memory for better generation quality.
95
+ * Tested operating system: Linux
96
+
97
+ ## 🛠️ Dependencies and Installation
98
+
99
+ Begin by cloning the repository:
100
+ ```shell
101
+ git clone https://github.com/tencent/HunyuanVideo-I2V
102
+ cd HunyuanVideo-I2V
103
+ ```
104
+
105
+ ### Installation Guide for Linux
106
+
107
+ We recommend CUDA versions 12.4 or 11.8 for the manual installation.
108
+
109
+ Conda's installation instructions are available [here](https://docs.anaconda.com/free/miniconda/index.html).
110
+
111
+ ```shell
112
+ # 1. Create conda environment
113
+ conda create -n HunyuanVideo-I2V python==3.11.9
114
+
115
+ # 2. Activate the environment
116
+ conda activate HunyuanVideo-I2V
117
+
118
+ # 3. Install PyTorch and other dependencies using conda
119
+ # For CUDA 12.4
120
+ conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.4 -c pytorch -c nvidia
121
+
122
+ # 4. Install pip dependencies
123
+ python -m pip install -r requirements.txt
124
+
125
+ # 5. Install flash attention v2 for acceleration (requires CUDA 11.8 or above)
126
+ python -m pip install ninja
127
+ python -m pip install git+https://github.com/Dao-AILab/flash-attention.git@v2.6.3
128
+
129
+ # 6. Install xDiT for parallel inference (It is recommended to use torch 2.4.0 and flash-attn 2.6.3)
130
+ python -m pip install xfuser==0.4.0
131
+ ```
132
+
133
+ In case of running into float point exception(core dump) on the specific GPU type, you may try the following solutions:
134
+
135
+ ```shell
136
+ # Making sure you have installed CUDA 12.4, CUBLAS>=12.4.5.8, and CUDNN>=9.00 (or simply using our CUDA 12 docker image).
137
+ pip install nvidia-cublas-cu12==12.4.5.8
138
+ export LD_LIBRARY_PATH=/opt/conda/lib/python3.8/site-packages/nvidia/cublas/lib/
139
+ ```
140
+
141
+ Additionally, HunyuanVideo-I2V also provides a pre-built Docker image. Use the following command to pull and run the docker image.
142
+
143
+ ```shell
144
+ # For CUDA 12.4 (updated to avoid float point exception)
145
+ docker pull hunyuanvideo/hunyuanvideo-i2v:cuda12
146
+ docker run -itd --gpus all --init --net=host --uts=host --ipc=host --name hunyuanvideo-i2v --security-opt=seccomp=unconfined --ulimit=stack=67108864 --ulimit=memlock=-1 --privileged hunyuanvideo/hunyuanvideo-i2v:cuda12
147
+ ```
148
+
149
+
150
+ ## 🧱 Download Pretrained Models
151
+
152
+ The details of download pretrained models are shown [here](ckpts/README.md).
153
+
154
+
155
+
156
+ ## 🔑 Single-gpu Inference
157
+
158
+ Similar to [HunyuanVideo](https://github.com/Tencent/HunyuanVideo), HunyuanVideo-I2V supports high-resolution video generation, with resolution up to 720P and video length up to 129 frames (5 seconds).
159
+
160
+ ### Tips for Using Image-to-Video Models
161
+ - **Use Concise Prompts**: To effectively guide the model's generation, keep your prompts short and to the point.
162
+ - **Include Key Elements**: A well-structured prompt should cover:
163
+ - **Main Subject**: Specify the primary focus of the video.
164
+ - **Action**: Describe the main movement or activity taking place.
165
+ - **Background (Optional)**: Set the scene for the video.
166
+ - **Camera Angle (Optional)**: Indicate the perspective or viewpoint.
167
+ - **Avoid Overly Detailed Prompts**: Lengthy or highly detailed prompts can lead to unnecessary transitions in the video output.
168
+
169
+ <!-- **For image-to-video models, we recommend using concise prompts to guide the model's generation process. A good prompt should include elements such as background, main subject, action, and camera angle. Overly long or excessively detailed prompts may introduce unnecessary transitions.** -->
170
+
171
+ ### Using Command Line
172
+
173
+ <!-- ### Run a Gradio Server
174
+ ```bash
175
+ python3 gradio_server.py --flow-reverse
176
+
177
+ # set SERVER_NAME and SERVER_PORT manually
178
+ # SERVER_NAME=0.0.0.0 SERVER_PORT=8081 python3 gradio_server.py --flow-reverse
179
+ ``` -->
180
+ If you want to generate a more **stable** video, you can set `--i2v-stability` and `--flow-shift 7.0`. Execute the command as follows
181
+ ```bash
182
+ cd HunyuanVideo-I2V
183
+
184
+ python3 sample_image2video.py \
185
+ --model HYVideo-T/2 \
186
+ --prompt ""An Asian man with short hair in black tactical uniform and white clothes waves a firework stick."" \
187
+ --i2v-mode \
188
+ --i2v-image-path ./assets/demo/i2v/imgs/0.jpg \
189
+ --i2v-resolution 720p \
190
+ --i2v-stability \
191
+ --infer-steps 50 \
192
+ --video-length 129 \
193
+ --flow-reverse \
194
+ --flow-shift 7.0 \
195
+ --seed 0 \
196
+ --embedded-cfg-scale 6.0 \
197
+ --use-cpu-offload \
198
+ --save-path ./results
199
+ ```
200
+ If you want to generate a more **high-dynamic** video, you can **unset** `--i2v-stability` and `--flow-shift 17.0`. Execute the command as follows
201
+ ```bash
202
+ cd HunyuanVideo-I2V
203
+
204
+ python3 sample_image2video.py \
205
+ --model HYVideo-T/2 \
206
+ --prompt ""An Asian man with short hair in black tactical uniform and white clothes waves a firework stick."" \
207
+ --i2v-mode \
208
+ --i2v-image-path ./assets/demo/i2v/imgs/0.jpg \
209
+ --i2v-resolution 720p \
210
+ --infer-steps 50 \
211
+ --video-length 129 \
212
+ --flow-reverse \
213
+ --flow-shift 17.0 \
214
+ --embedded-cfg-scale 6.0 \
215
+ --seed 0 \
216
+ --use-cpu-offload \
217
+ --save-path ./results
218
+ ```
219
+ ### More Configurations
220
+
221
+ We list some more useful configurations for easy usage:
222
+
223
+ | Argument | Default | Description |
224
+ |:----------------------:|:----------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
225
+ | `--prompt` | None | The text prompt for video generation. |
226
+ | `--model` | HYVideo-T/2-cfgdistill | Here we use HYVideo-T/2 for I2V, HYVideo-T/2-cfgdistill is used for T2V mode. |
227
+ | `--i2v-mode` | False | Whether to open i2v mode. |
228
+ | `--i2v-image-path` | ./assets/demo/i2v/imgs/0.jpg | The reference image for video generation. |
229
+ | `--i2v-resolution` | 720p | The resolution for the generated video. |
230
+ | `--i2v-stability` | False | Whether to use stable mode for i2v inference. |
231
+ | `--video-length` | 129 | The length of the generated video. |
232
+ | `--infer-steps` | 50 | The number of steps for sampling. |
233
+ | `--flow-shift` | 7.0 | Shift factor for flow matching schedulers. We recommend 7 with `--i2v-stability` switch on for more stable video, 17 with `--i2v-stability` switch off for more dynamic video |
234
+ | `--flow-reverse` | False | If reverse, learning/sampling from t=1 -> t=0. |
235
+ | `--seed` | None | The random seed for generating video, if None, we init a random seed. |
236
+ | `--use-cpu-offload` | False | Use CPU offload for the model load to save more memory, necessary for high-res video generation. |
237
+ | `--save-path` | ./results | Path to save the generated video. |
238
+
239
+
240
+
241
+ ## 🎉 Customizable I2V LoRA effects training
242
+
243
+ ### Requirements
244
+
245
+ The following table shows the requirements for training HunyuanVideo-I2V lora model (batch size = 1) to generate videos:
246
+
247
+ | Model | Resolution | GPU Peak Memory |
248
+ |:----------------:|:----------:|:---------------:|
249
+ | HunyuanVideo-I2V | 360p | 79GB |
250
+
251
+ * An NVIDIA GPU with CUDA support is required.
252
+ * The model is tested on a single 80G GPU.
253
+ * **Minimum**: The minimum GPU memory required is 79GB for 360p.
254
+ * **Recommended**: We recommend using a GPU with 80GB of memory for better generation quality.
255
+ * Tested operating system: Linux
256
+ * Note: You can train with 360p data and directly infer 720p videos
257
+
258
+ ### Environment
259
+ ```
260
+ pip install -r requirements.txt
261
+ ```
262
+
263
+ ### Training data construction
264
+ Prompt description: The trigger word is written directly in the video caption. It is recommended to use a phrase or short sentence.
265
+
266
+ For example, AI hair growth effect (trigger): rapid_hair_growth, The hair of the characters in the video is growing rapidly. + original prompt
267
+
268
+ After having the training video and prompt pair, refer to [here] (hyvideo/hyvae_extract/README.md) for training data construction.
269
+
270
+
271
+ ### Training
272
+ ```
273
+ cd HunyuanVideo-I2V
274
+
275
+ sh scripts/run_train_image2video_lora.sh
276
+ ```
277
+ We list some training specific configurations for easy usage:
278
+
279
+ | Argument | Default | Description |
280
+ |:----------------:|:-------------------------------------------------------------:|:-----------------------------------------------------------:|
281
+ | `SAVE_BASE` | . | Root path for saving experimental results. |
282
+ | `EXP_NAME` | i2v_lora | Path suffix for saving experimental results. |
283
+ | `DATA_JSONS_DIR` | ./assets/demo/i2v_lora/train_dataset/processed_data/json_path | Data jsons dir generated by hyvideo/hyvae_extract/start.sh. |
284
+ | `CHIEF_IP` | 127.0.0.1 | Master node IP of the machine. |
285
+
286
+ After training, you can find `pytorch_lora_kohaya_weights.safetensors` in `{SAVE_BASE}/log_EXP/*_{EXP_NAME}/checkpoints/global_step{*}/pytorch_lora_kohaya_weights.safetensors` and set it in `--lora-path` to perform inference.
287
+
288
+ ### Inference
289
+ ```bash
290
+ cd HunyuanVideo-I2V
291
+
292
+ python3 sample_image2video.py \
293
+ --model HYVideo-T/2 \
294
+ --prompt ""Two people hugged tightly, In the video, two people are standing apart from each other. They then move closer to each other and begin to hug tightly. The hug is very affectionate, with the two people holding each other tightly and looking into each other's eyes. The interaction is very emotional and heartwarming, with the two people expressing their love and affection for each other."" \
295
+ --i2v-mode \
296
+ --i2v-image-path ./assets/demo/i2v_lora/imgs/embrace.png \
297
+ --i2v-resolution 720p \
298
+ --i2v-stability \
299
+ --infer-steps 50 \
300
+ --video-length 129 \
301
+ --flow-reverse \
302
+ --flow-shift 5.0 \
303
+ --embedded-cfg-scale 6.0 \
304
+ --seed 0 \
305
+ --use-cpu-offload \
306
+ --save-path ./results \
307
+ --use-lora \
308
+ --lora-scale 1.0 \
309
+ --lora-path ./ckpts/hunyuan-video-i2v-720p/lora/embrace_kohaya_weights.safetensors
310
+ ```
311
+ We list some lora specific configurations for easy usage:
312
+
313
+ | Argument | Default | Description |
314
+ |:-------------------:|:-------:|:----------------------------:|
315
+ | `--use-lora` | False | Whether to open lora mode. |
316
+ | `--lora-scale` | 1.0 | Fusion scale for lora model. |
317
+ | `--lora-path` | """" | Weight path for lora model. |
318
+
319
+ ## 🚀 Parallel Inference on Multiple GPUs by xDiT
320
+
321
+ [xDiT](https://github.com/xdit-project/xDiT) is a Scalable Inference Engine for Diffusion Transformers (DiTs) on multi-GPU Clusters.
322
+ It has successfully provided low-latency parallel inference solutions for a variety of DiTs models, including mochi-1, CogVideoX, Flux.1, SD3, etc. This repo adopted the [Unified Sequence Parallelism (USP)](https://arxiv.org/abs/2405.07719) APIs for parallel inference of the HunyuanVideo-I2V model.
323
+
324
+ ### Using Command Line
325
+
326
+ For example, to generate a video with 8 GPUs, you can use the following command:
327
+
328
+ ```bash
329
+ cd HunyuanVideo-I2V
330
+
331
+ torchrun --nproc_per_node=8 sample_image2video.py \
332
+ --model HYVideo-T/2 \
333
+ --prompt ""An Asian man with short hair in black tactical uniform and white clothes waves a firework stick."" \
334
+ --i2v-mode \
335
+ --i2v-image-path ./assets/demo/i2v/imgs/0.jpg \
336
+ --i2v-resolution 720p \
337
+ --i2v-stability \
338
+ --infer-steps 50 \
339
+ --video-length 129 \
340
+ --flow-reverse \
341
+ --flow-shift 7.0 \
342
+ --seed 0 \
343
+ --embedded-cfg-scale 6.0 \
344
+ --save-path ./results \
345
+ --ulysses-degree 8 \
346
+ --ring-degree 1 \
347
+ --video-size 1280 720 \
348
+ --xdit-adaptive-size
349
+ ```
350
+
351
+ You can change the `--ulysses-degree` and `--ring-degree` to control the parallel configurations for the best performance.
352
+ Note that you need to set `--video-size` since xDiT's acceleration mechanism has requirements for the size of the video to be generated.
353
+ To prevent black padding after converting the original image height/width to the target height/width, you can use `--xdit-adaptive-size`.
354
+ The valid parallel configurations are shown in the following table.
355
+
356
+ <details>
357
+ <summary>Supported Parallel Configurations (Click to expand)</summary>
358
+
359
+ | --video-size | --video-length | --ulysses-degree x --ring-degree | --nproc_per_node |
360
+ |----------------------|----------------|----------------------------------|------------------|
361
+ | 1280 720 or 720 1280 | 129 | 8x1,4x2,2x4,1x8 | 8 |
362
+ | 1280 720 or 720 1280 | 129 | 1x5 | 5 |
363
+ | 1280 720 or 720 1280 | 129 | 4x1,2x2,1x4 | 4 |
364
+ | 1280 720 or 720 1280 | 129 | 3x1,1x3 | 3 |
365
+ | 1280 720 or 720 1280 | 129 | 2x1,1x2 | 2 |
366
+ | 1104 832 or 832 1104 | 129 | 4x1,2x2,1x4 | 4 |
367
+ | 1104 832 or 832 1104 | 129 | 3x1,1x3 | 3 |
368
+ | 1104 832 or 832 1104 | 129 | 2x1,1x2 | 2 |
369
+ | 960 960 | 129 | 6x1,3x2,2x3,1x6 | 6 |
370
+ | 960 960 | 129 | 4x1,2x2,1x4 | 4 |
371
+ | 960 960 | 129 | 3x1,1x3 | 3 |
372
+ | 960 960 | 129 | 1x2,2x1 | 2 |
373
+ | 960 544 or 544 960 | 129 | 6x1,3x2,2x3,1x6 | 6 |
374
+ | 960 544 or 544 960 | 129 | 4x1,2x2,1x4 | 4 |
375
+ | 960 544 or 544 960 | 129 | 3x1,1x3 | 3 |
376
+ | 960 544 or 544 960 | 129 | 1x2,2x1 | 2 |
377
+ | 832 624 or 624 832 | 129 | 4x1,2x2,1x4 | 4 |
378
+ | 624 832 or 624 832 | 129 | 3x1,1x3 | 3 |
379
+ | 832 624 or 624 832 | 129 | 2x1,1x2 | 2 |
380
+ | 720 720 | 129 | 1x5 | 5 |
381
+ | 720 720 | 129 | 3x1,1x3 | 3 |
382
+
383
+ </details>
384
+
385
+
386
+ <p align=""center"">
387
+ <table align=""center"">
388
+ <thead>
389
+ <tr>
390
+ <th colspan=""4"">Latency (Sec) for 1280x720 (129 frames 50 steps) on 8xGPU</th>
391
+ </tr>
392
+ <tr>
393
+ <th>1</th>
394
+ <th>2</th>
395
+ <th>4</th>
396
+ <th>8</th>
397
+ </tr>
398
+ </thead>
399
+ <tbody>
400
+ <tr>
401
+ <th>1904.08</th>
402
+ <th>934.09 (2.04x)</th>
403
+ <th>514.08 (3.70x)</th>
404
+ <th>337.58 (5.64x)</th>
405
+ </tr>
406
+
407
+ </tbody>
408
+ </table>
409
+ </p>
410
+
411
+
412
+ ## 🔗 BibTeX
413
+
414
+ If you find [HunyuanVideo](https://arxiv.org/abs/2412.03603) useful for your research and applications, please cite using this BibTeX:
415
+
416
+ ```BibTeX
417
+ @misc{kong2024hunyuanvideo,
418
+ title={HunyuanVideo: A Systematic Framework For Large Video Generative Models},
419
+ author={Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, Kathrina Wu, Qin Lin, Aladdin Wang, Andong Wang, Changlin Li, Duojun Huang, Fang Yang, Hao Tan, Hongmei Wang, Jacob Song, Jiawang Bai, Jianbing Wu, Jinbao Xue, Joey Wang, Junkun Yuan, Kai Wang, Mengyang Liu, Pengyu Li, Shuai Li, Weiyan Wang, Wenqing Yu, Xinchi Deng, Yang Li, Yanxin Long, Yi Chen, Yutao Cui, Yuanbo Peng, Zhentao Yu, Zhiyu He, Zhiyong Xu, Zixiang Zhou, Zunnan Xu, Yangyu Tao, Qinglin Lu, Songtao Liu, Dax Zhou, Hongfa Wang, Yong Yang, Di Wang, Yuhong Liu, and Jie Jiang, along with Caesar Zhong},
420
+ year={2024},
421
+ archivePrefix={arXiv preprint arXiv:2412.03603},
422
+ primaryClass={cs.CV},
423
+ url={https://arxiv.org/abs/2412.03603},
424
+ }
425
+ ```
426
+
427
+
428
+
429
+ ## Acknowledgements
430
+
431
+ We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [FLUX](https://github.com/black-forest-labs/flux), [Llama](https://github.com/meta-llama/llama), [LLaVA](https://github.com/haotian-liu/LLaVA), [Xtuner](https://github.com/InternLM/xtuner), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research and exploration.
432
+ Additionally, we also thank the Tencent Hunyuan Multimodal team for their help with the text encoder.
433
+
434
+
435
+
436
+
437
+ <!-- ## Github Star History
438
+ <a href=""https://star-history.com/#Tencent/HunyuanVideo&Date"">
439
+ <picture>
440
+ <source media=""(prefers-color-scheme: dark)"" srcset=""https://api.star-history.com/svg?repos=Tencent/HunyuanVideo&type=Date&theme=dark"" />
441
+ <source media=""(prefers-color-scheme: light)"" srcset=""https://api.star-history.com/svg?repos=Tencent/HunyuanVideo&type=Date"" />
442
+ <img alt=""Star History Chart"" src=""https://api.star-history.com/svg?repos=Tencent/HunyuanVideo&type=Date"" />
443
+ </picture>
444
+ </a> -->
445
+ ","{""id"": ""tencent/HunyuanVideo-I2V"", ""author"": ""tencent"", ""sha"": ""3914f209367854b5e470f062c33159d5ab139e1e"", ""last_modified"": ""2025-03-13 10:36:22+00:00"", ""created_at"": ""2025-03-05 09:27:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3309, ""downloads_all_time"": null, ""likes"": 305, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""image-to-video"", ""arxiv:2412.03603"", ""arxiv:2405.07719"", ""license:other"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE\npipeline_tag: image-to-video"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-i2v-720p/lora/embrace_kohaya_weights.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-i2v-720p/lora/hair_growth_kohaya_weights.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-i2v-720p/transformers/mp_rank_00_model_states.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-i2v-720p/vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='hunyuan-video-i2v-720p/vae/pytorch_model.pt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-13 10:36:22+00:00"", ""cardData"": ""license: other\nlicense_name: tencent-hunyuan-community\nlicense_link: LICENSE\npipeline_tag: image-to-video"", ""transformersInfo"": null, ""_id"": ""67c818e729514343cee6eb43"", ""modelId"": ""tencent/HunyuanVideo-I2V"", ""usedStorage"": 59395190150}",0,"https://huggingface.co/hunyuanvideo-community/HunyuanVideo-I2V, https://huggingface.co/hunyuanvideo-community/HunyuanVideo-I2V-33ch",2,,0,"https://huggingface.co/city96/HunyuanVideo-I2V-gguf, https://huggingface.co/calcuis/hyvid-i2v-gguf, https://huggingface.co/hanzogak/HunyuanVideo-I2V-gguf",3,,0,huggingface/InferenceSupport/discussions/1103,1
446
+ hunyuanvideo-community/HunyuanVideo-I2V,"---
447
+ base_model:
448
+ - tencent/HunyuanVideo-I2V
449
+ library_name: diffusers
450
+ pipeline_tag: image-to-video
451
+ ---
452
+
453
+ Unofficial community fork for Diffusers-format weights on [`tencent/HunyuanVideo-I2V`](https://huggingface.co/tencent/HunyuanVideo-I2V).
454
+
455
+ ### Using Diffusers
456
+
457
+ HunyuanVideo-I2V can be used directly from Diffusers. Install the latest version of Diffusers.
458
+
459
+ ```python
460
+ import torch
461
+ from diffusers import HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel
462
+ from diffusers.utils import load_image, export_to_video
463
+
464
+ # Available checkpoints: ""hunyuanvideo-community/HunyuanVideo-I2V"" and ""hunyuanvideo-community/HunyuanVideo-I2V-33ch""
465
+ model_id = ""hunyuanvideo-community/HunyuanVideo-I2V""
466
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
467
+ model_id, subfolder=""transformer"", torch_dtype=torch.bfloat16
468
+ )
469
+ pipe = HunyuanVideoImageToVideoPipeline.from_pretrained(
470
+ model_id, transformer=transformer, torch_dtype=torch.float16
471
+ )
472
+ pipe.vae.enable_tiling()
473
+ pipe.to(""cuda"")
474
+
475
+ prompt = ""A man with short gray hair plays a red electric guitar.""
476
+ image = load_image(
477
+ ""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png""
478
+ )
479
+
480
+ output = pipe(image=image, prompt=prompt).frames[0]
481
+ export_to_video(output, ""output.mp4"", fps=15)
482
+ ```
483
+
484
+ Refer to the [documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video) for more information.","{""id"": ""hunyuanvideo-community/HunyuanVideo-I2V"", ""author"": ""hunyuanvideo-community"", ""sha"": ""fb9d287ef02fe6d39f2e23df6dcec1294e6c28d2"", ""last_modified"": ""2025-03-19 16:17:53+00:00"", ""created_at"": ""2025-03-19 16:11:23+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 563, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""image-to-video"", ""base_model:tencent/HunyuanVideo-I2V"", ""base_model:finetune:tencent/HunyuanVideo-I2V"", ""diffusers:HunyuanVideoImageToVideoPipeline"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/HunyuanVideo-I2V\nlibrary_name: diffusers\npipeline_tag: image-to-video"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""HunyuanVideoImageToVideoPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-19 16:17:53+00:00"", ""cardData"": ""base_model:\n- tencent/HunyuanVideo-I2V\nlibrary_name: diffusers\npipeline_tag: image-to-video"", ""transformersInfo"": null, ""_id"": ""67daecabf0dad894b39f6bae"", ""modelId"": ""hunyuanvideo-community/HunyuanVideo-I2V"", ""usedStorage"": 43644070598}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=hunyuanvideo-community/HunyuanVideo-I2V&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhunyuanvideo-community%2FHunyuanVideo-I2V%5D(%2Fhunyuanvideo-community%2FHunyuanVideo-I2V)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
485
+ hunyuanvideo-community/HunyuanVideo-I2V-33ch,"---
486
+ base_model:
487
+ - tencent/HunyuanVideo-I2V
488
+ library_name: diffusers
489
+ ---
490
+
491
+ Unofficial community fork for Diffusers-format weights on [`tencent/HunyuanVideo-I2V`](https://huggingface.co/tencent/HunyuanVideo-I2V).
492
+
493
+ ### Using Diffusers
494
+
495
+ HunyuanVideo-I2V can be used directly from Diffusers. Install the latest version of Diffusers.
496
+
497
+ ```python
498
+ import torch
499
+ from diffusers import HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel
500
+ from diffusers.utils import load_image, export_to_video
501
+
502
+ # Available checkpoints: ""hunyuanvideo-community/HunyuanVideo-I2V"" and ""hunyuanvideo-community/HunyuanVideo-I2V-33ch""
503
+ model_id = ""hunyuanvideo-community/HunyuanVideo-I2V-33ch""
504
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
505
+ model_id, subfolder=""transformer"", torch_dtype=torch.bfloat16
506
+ )
507
+ pipe = HunyuanVideoImageToVideoPipeline.from_pretrained(
508
+ model_id, transformer=transformer, torch_dtype=torch.float16
509
+ )
510
+ pipe.vae.enable_tiling()
511
+ pipe.to(""cuda"")
512
+
513
+ prompt = ""A man with short gray hair plays a red electric guitar.""
514
+ image = load_image(
515
+ ""https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png""
516
+ )
517
+
518
+ output = pipe(image=image, prompt=prompt).frames[0]
519
+ export_to_video(output, ""output.mp4"", fps=15)
520
+ ```
521
+
522
+ Refer to the [documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/hunyuan_video) for more information.
523
+
524
+ ","{""id"": ""hunyuanvideo-community/HunyuanVideo-I2V-33ch"", ""author"": ""hunyuanvideo-community"", ""sha"": ""fddc4df6b7fcc55080ba903e88ca62804f4dd479"", ""last_modified"": ""2025-03-20 05:26:24+00:00"", ""created_at"": ""2025-03-06 21:48:37+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 30, ""downloads_all_time"": null, ""likes"": 6, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""base_model:tencent/HunyuanVideo-I2V"", ""base_model:finetune:tencent/HunyuanVideo-I2V"", ""diffusers:HunyuanVideoImageToVideoPipeline"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- tencent/HunyuanVideo-I2V\nlibrary_name: diffusers"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""HunyuanVideoImageToVideoPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder_2/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_2/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-20 05:26:24+00:00"", ""cardData"": ""base_model:\n- tencent/HunyuanVideo-I2V\nlibrary_name: diffusers"", ""transformersInfo"": null, ""_id"": ""67ca1835b73e313ed13ffce9"", ""modelId"": ""hunyuanvideo-community/HunyuanVideo-I2V-33ch"", ""usedStorage"": 43624028374}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=hunyuanvideo-community/HunyuanVideo-I2V-33ch&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhunyuanvideo-community%2FHunyuanVideo-I2V-33ch%5D(%2Fhunyuanvideo-community%2FHunyuanVideo-I2V-33ch)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
IP-Adapter_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ h94/IP-Adapter,"---
3
+ tags:
4
+ - text-to-image
5
+ - stable-diffusion
6
+ license: apache-2.0
7
+ language:
8
+ - en
9
+ library_name: diffusers
10
+ ---
11
+
12
+ # IP-Adapter Model Card
13
+
14
+
15
+ <div align=""center"">
16
+
17
+ [**Project Page**](https://ip-adapter.github.io) **|** [**Paper (ArXiv)**](https://arxiv.org/abs/2308.06721) **|** [**Code**](https://github.com/tencent-ailab/IP-Adapter)
18
+ </div>
19
+
20
+ ---
21
+
22
+
23
+ ## Introduction
24
+
25
+ we present IP-Adapter, an effective and lightweight adapter to achieve image prompt capability for the pre-trained text-to-image diffusion models. An IP-Adapter with only 22M parameters can achieve comparable or even better performance to a fine-tuned image prompt model. IP-Adapter can be generalized not only to other custom models fine-tuned from the same base model, but also to controllable generation using existing controllable tools. Moreover, the image prompt can also work well with the text prompt to accomplish multimodal image generation.
26
+
27
+ ![arch](./fig1.png)
28
+
29
+ ## Models
30
+
31
+ ### Image Encoder
32
+ - [models/image_encoder](https://huggingface.co/h94/IP-Adapter/tree/main/models/image_encoder): [OpenCLIP-ViT-H-14](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K) with 632.08M parameter
33
+ - [sdxl_models/image_encoder](https://huggingface.co/h94/IP-Adapter/tree/main/sdxl_models/image_encoder): [OpenCLIP-ViT-bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) with 1844.9M parameter
34
+
35
+ More information can be found [here](https://laion.ai/blog/giant-openclip/)
36
+
37
+ ### IP-Adapter for SD 1.5
38
+ - [ip-adapter_sd15.bin](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter_sd15.bin): use global image embedding from OpenCLIP-ViT-H-14 as condition
39
+ - [ip-adapter_sd15_light.bin](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter_sd15_light.bin): same as ip-adapter_sd15, but more compatible with text prompt
40
+ - [ip-adapter-plus_sd15.bin](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-plus_sd15.bin): use patch image embeddings from OpenCLIP-ViT-H-14 as condition, closer to the reference image than ip-adapter_sd15
41
+ - [ip-adapter-plus-face_sd15.bin](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-plus-face_sd15.bin): same as ip-adapter-plus_sd15, but use cropped face image as condition
42
+
43
+ ### IP-Adapter for SDXL 1.0
44
+ - [ip-adapter_sdxl.bin](https://huggingface.co/h94/IP-Adapter/blob/main/sdxl_models/ip-adapter_sdxl.bin): use global image embedding from OpenCLIP-ViT-bigG-14 as condition
45
+ - [ip-adapter_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/blob/main/sdxl_models/ip-adapter_sdxl_vit-h.bin): same as ip-adapter_sdxl, but use OpenCLIP-ViT-H-14
46
+ - [ip-adapter-plus_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/blob/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin): use patch image embeddings from OpenCLIP-ViT-H-14 as condition, closer to the reference image than ip-adapter_xl and ip-adapter_sdxl_vit-h
47
+ - [ip-adapter-plus-face_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/blob/main/sdxl_models/ip-adapter-plus-face_sdxl_vit-h.bin): same as ip-adapter-plus_sdxl_vit-h, but use cropped face image as condition
48
+ ","{""id"": ""h94/IP-Adapter"", ""author"": ""h94"", ""sha"": ""018e402774aeeddd60609b4ecdb7e298259dc729"", ""last_modified"": ""2024-03-27 08:33:41+00:00"", ""created_at"": ""2023-08-16 04:15:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1159, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-image"", ""stable-diffusion"", ""en"", ""arxiv:2308.06721"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\ntags:\n- text-to-image\n- stable-diffusion"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fig1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/image_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/image_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/image_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-full-face_sd15.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-full-face_sd15.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-plus-face_sd15.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-plus-face_sd15.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-plus_sd15.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter-plus_sd15.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15_light.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15_light.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15_light_v11.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15_vit-G.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='models/ip-adapter_sd15_vit-G.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/image_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/image_encoder/model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/image_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter-plus-face_sdxl_vit-h.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter-plus-face_sdxl_vit-h.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter-plus_sdxl_vit-h.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter_sdxl.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter_sdxl.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter_sdxl_vit-h.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='sdxl_models/ip-adapter_sdxl_vit-h.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Wuvin/Unique3D"", ""radames/Real-Time-Latent-Consistency-Model"", ""okaris/omni-zero"", ""InstantX/InstantStyle"", ""tencent/Hunyuan3D-2mv"", ""fffiloni/ZeST"", ""xingpng/CSGO"", ""AP123/AnimateImage"", ""ymzhang319/FoleyCrafter"", ""Leoxing/PIA"", ""MykolaL/StableDesign"", ""tencent/Hunyuan3D-2mini-Turbo"", ""TencentARC/ColorFlow"", ""ozgurkara/RAVE"", ""tight-inversion/tight-inversion"", ""clinteroni/outpainting-with-differential-diffusion-demo"", ""jeasinema/UltraEdit-SD3"", ""feishen29/IMAGDressing-v1"", ""radames/InstantStyle-SDXL-Lightning"", ""ginigen/3D-LLAMA"", ""prs-eth/rollingdepth"", ""ameerazam08/InstantStyle-GPU-Demo"", ""AP123/InstaSoyjak"", ""Huage001/LinFusion-SD-v1.5"", ""okaris/omni-zero-couples"", ""Yiyuan/InteractiveVideo"", ""radames/InstantStyle-Hyper-SD"", ""OzzyGT/diffusers-recolor"", ""tonyassi/IP-Adapter-Playground"", ""rynmurdock/generative_recsys"", ""ironbar/IP-Adapter-FaceID-Plus"", ""samir-fama/Image-Adapter-With-Face-ID"", ""radames/InstantStyle-Hyper-SDXL"", ""IP-composer/ip-composer"", ""jasperai/inversion-instantstyle"", ""Gyufyjk/FoleyCrafter"", ""kfirgold99/Piece-it-Together-Space"", ""ariG23498/makeanime"", ""kadirnar/ZeST"", ""Westlake-AGI-Lab/StyleStudio"", ""VIDraft/Unique3D"", ""mubarak-alketbi/Hunyuan3D-2mini-Turbo"", ""polymathai/AnimationDiff"", ""LTT/Kiss3DGen"", ""jiuface/ai-model-002"", ""Hatman/InstantStyle"", ""johnygoddard/outpainting-with-differential-diffusion-demo"", ""theSure/Omnieraser"", ""neil-ni/Unique3D"", ""LightningDrag/LightningDrag"", ""MohamedTalaat91/Image-Adapter-With-Face-ID"", ""cbensimon/omni-zero-couples"", ""qyoo/AID-v2"", ""JunhaoZhuang/Cobra"", ""dmaniloff/API-InstantStyle-SDXL-Lightning"", ""hideosnes/Zero-Shot-Material-Transfer"", ""kevinwang676/Diffutoon"", ""dezzman/diffusion_models"", ""hysts-duplicates/Unique3D"", ""cavargas10/Unico3D-Texto3D"", ""cris2312/pepe"", ""cocktailpeanut/InstantStyle"", ""rynmurdock/Blue_Tigers"", ""abreza/Unique3D"", ""waloneai/outpainting-with-differential-diffusion-demo"", ""amos1088/test_gradio"", ""charbel-malo/3dGenesis"", ""Lifeinhockey/Diffusion_Models"", ""Dekonstruktio/IP-Adapter-Playground"", ""anushriiyer/IMAGDressing-v1"", ""sdafd/thumbnail-testing"", ""fritzgnad2/InstantStyle"", ""huzey/MoodSpace"", ""QAGITECH/h94-IP-Adapter"", ""vakilrathod67/h94-IP-Adapter"", ""Omega-02/h94-IP-Adapter"", ""limm2023/h94-IP-Adapter"", ""duu12/h94-IP-Adapter"", ""poetrychor/h94-IP-Adapterji"", ""Sagar7777/h94-IP-Adapter"", ""rocky678/h94-IP-Adapter"", ""Saketh1430/h94-IP-Adapter"", ""johnnygjj/h94-IP-Adapter"", ""unve/h94-IP-Adapter"", ""hycsy2024/h94-IP-Adapter"", ""cocktailpeanut/InstantStyle-SDXL-Lightning"", ""Abhinav268/h94-IP-Adapter"", ""cocktailpeanut/generative_recsys"", ""insync17/h94-IP-Adapter"", ""samuel171731/h94-IP-Adapter"", ""divyanshudk142/h94-IP-Adapter"", ""shivguddadmath/Hyper-SDXL"", ""fraggy2323/h94-IP-Adapter"", ""youngwoo-dev/test1"", ""cocktailpeanut/ZeST"", ""kartiikx3/h94-IP-Adapter"", ""hideosnes/SDXL-Lightning"", ""tsi-org/Zero-Shot-Material-Transfer"", ""alischme/h94-IP-Adapter"", ""poetrychor/h94-IP-Adapter""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-27 08:33:41+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: diffusers\nlicense: apache-2.0\ntags:\n- text-to-image\n- stable-diffusion"", ""transformersInfo"": null, ""_id"": ""64dc4d731d19239f50867b34"", ""modelId"": ""h94/IP-Adapter"", ""usedStorage"": 19822036278}",0,,0,"https://huggingface.co/YazzRey/Transformadores_Caso_3_PLN, https://huggingface.co/refiners/sd15.ip_adapter, https://huggingface.co/refiners/sd15.ip_adapter.plus, https://huggingface.co/refiners/sdxl.ip_adapter, https://huggingface.co/refiners/sdxl.ip_adapter.plus",5,,0,,0,"InstantX/InstantStyle, MykolaL/StableDesign, TencentARC/ColorFlow, Wuvin/Unique3D, clinteroni/outpainting-with-differential-diffusion-demo, huggingface/InferenceSupport/discussions/1050, okaris/omni-zero, ozgurkara/RAVE, tencent/Hunyuan3D-2mini-Turbo, tencent/Hunyuan3D-2mv, tight-inversion/tight-inversion, xingpng/CSGO, ymzhang319/FoleyCrafter",13
LLaMA-2-7B-32K_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ togethercomputer/LLaMA-2-7B-32K,"---
3
+ license: llama2
4
+ datasets:
5
+ - togethercomputer/RedPajama-Data-1T
6
+ - togethercomputer/RedPajama-Data-Instruct
7
+ - EleutherAI/pile
8
+ - togethercomputer/Long-Data-Collections
9
+ language:
10
+ - en
11
+ library_name: transformers
12
+ ---
13
+
14
+ # LLaMA-2-7B-32K
15
+
16
+ ## Model Description
17
+
18
+ LLaMA-2-7B-32K is an open-source, long context language model developed by Together, fine-tuned from Meta's original Llama-2 7B model.
19
+ This model represents our efforts to contribute to the rapid progress of the open-source ecosystem for large language models.
20
+ The model has been extended to a context length of 32K with position interpolation,
21
+ allowing applications on multi-document QA, long text summarization, etc.
22
+
23
+ ## What's new?
24
+
25
+ This model introduces several improvements and new features:
26
+
27
+ 1. **Extended Context:** The model has been trained to handle context lengths up to 32K, which is a significant improvement over the previous versions.
28
+
29
+ 2. **Pre-training and Instruction Tuning:** We have shared our data recipe, which consists of a mixture of pre-training and instruction tuning data.
30
+
31
+ 3. **Fine-tuning Examples:** We provide examples of how to fine-tune the model for specific applications, including book summarization and long context question and answering.
32
+
33
+ 4. **Software Support:** We have updated both the inference and training stack to allow efficient inference and fine-tuning for 32K context.
34
+
35
+ ## Model Architecture
36
+
37
+ The model follows the architecture of Llama-2-7B and extends it to handle a longer context. It leverages the recently released FlashAttention-2 and a range of other optimizations to improve the speed and efficiency of inference and training.
38
+
39
+ ## Training and Fine-tuning
40
+
41
+ The model has been trained using a mixture of pre-training and instruction tuning data.
42
+ - In the first training phase of continued pre-training, our data mixture contains 25% RedPajama Book, 25% RedPajama ArXiv (including abstracts), 25% other data from RedPajama, and 25% from the UL2 Oscar Data, which is a part of OIG (Open-Instruction-Generalist), asking the model to fill in missing chunks, or complete the text.
43
+ To enhance the long-context ability, we exclude data shorter than 2K word. The inclusion of UL2 Oscar Data is effective in compelling the model to read and utilize long-range context.
44
+ - We then fine-tune the model to focus on its few shot capacity under long context, including 20% Natural Instructions (NI), 20% Public Pool of Prompts (P3), 20% the Pile. We decontaminated all data against HELM core scenarios . We teach the model to leverage the in-context examples by packing examples into one 32K-token sequence. To maintain the knowledge learned from the first piece of data, we incorporate 20% RedPajama-Data Book and 20% RedPajama-Data ArXiv.
45
+
46
+ Next, we provide examples of how to fine-tune the model for specific applications.
47
+ The example datasets are placed in [togethercomputer/Long-Data-Collections](https://huggingface.co/datasets/togethercomputer/Long-Data-Collections)
48
+ You can use the [OpenChatKit](https://github.com/togethercomputer/OpenChatKit) to fine-tune your own 32K model over LLaMA-2-7B-32K.
49
+ Please refer to [OpenChatKit](https://github.com/togethercomputer/OpenChatKit) for step-by-step illustrations.
50
+
51
+ 1. Long Context QA.
52
+
53
+ We take as an example the multi-document question answering task from the paper “Lost in the Middle: How Language Models Use Long Contexts”. The input for the model consists of (i) a question that requires an answer and (ii) k documents, which are passages extracted from Wikipedia. Notably, only one of these documents contains the answer to the question, while the remaining k − 1 documents, termed as ""distractor"" documents, do not. To successfully perform this task, the model must identify and utilize the document containing the answer from its input context.
54
+
55
+ With OCK, simply run the following command to fine-tune:
56
+ ```
57
+ bash training/finetune_llama-2-7b-32k-mqa.sh
58
+ ```
59
+
60
+ 2. Summarization.
61
+
62
+ Another example is BookSum, a unique dataset designed to address the challenges of long-form narrative summarization. This dataset features source documents from the literature domain, including novels, plays, and stories, and offers human-written, highly abstractive summaries. We here focus on chapter-level data. BookSum poses a unique set of challenges, necessitating that the model comprehensively read through each chapter.
63
+
64
+ With OCK, simply run the following command to fine-tune:
65
+ ```
66
+ bash training/finetune_llama-2-7b-32k-booksum.sh
67
+ ```
68
+
69
+
70
+ ## Inference
71
+
72
+ You can use the [Together API](https://together.ai/blog/api-announcement) to try out LLaMA-2-7B-32K for inference.
73
+ The updated inference stack allows for efficient inference.
74
+
75
+ To run the model locally, we strongly recommend to install Flash Attention V2, which is necessary to obtain the best performance:
76
+ ```
77
+ # Please update the path of `CUDA_HOME`
78
+ export CUDA_HOME=/usr/local/cuda-11.8
79
+ pip install transformers==4.31.0
80
+ pip install sentencepiece
81
+ pip install ninja
82
+ pip install flash-attn --no-build-isolation
83
+ pip install git+https://github.com/HazyResearch/flash-attention.git#subdirectory=csrc/rotary
84
+ ```
85
+
86
+ You can use this model directly from the Hugging Face Model Hub or fine-tune it on your own data using the OpenChatKit.
87
+
88
+ ```python
89
+ from transformers import AutoTokenizer, AutoModelForCausalLM
90
+
91
+ tokenizer = AutoTokenizer.from_pretrained(""togethercomputer/LLaMA-2-7B-32K"")
92
+ model = AutoModelForCausalLM.from_pretrained(""togethercomputer/LLaMA-2-7B-32K"", trust_remote_code=True, torch_dtype=torch.float16)
93
+
94
+ input_context = ""Your text here""
95
+ input_ids = tokenizer.encode(input_context, return_tensors=""pt"")
96
+ output = model.generate(input_ids, max_length=128, temperature=0.7)
97
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
98
+ print(output_text)
99
+ ```
100
+
101
+ Alternatively, you can set `trust_remote_code=False` if you prefer not to use flash attention.
102
+
103
+
104
+ ## Limitations and Bias
105
+
106
+ As with all language models, LLaMA-2-7B-32K may generate incorrect or biased content. It's important to keep this in mind when using the model.
107
+
108
+ ## Community
109
+
110
+ Join us on [Together Discord](https://discord.gg/6ZVDU8tTD4)","{""id"": ""togethercomputer/LLaMA-2-7B-32K"", ""author"": ""togethercomputer"", ""sha"": ""46c24bb5aef59722fa7aa6d75e832afd1d64b980"", ""last_modified"": ""2024-03-28 01:14:07+00:00"", ""created_at"": ""2023-07-26 02:19:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5855, ""downloads_all_time"": null, ""likes"": 538, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""en"", ""dataset:togethercomputer/RedPajama-Data-1T"", ""dataset:togethercomputer/RedPajama-Data-Instruct"", ""dataset:EleutherAI/pile"", ""dataset:togethercomputer/Long-Data-Collections"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- togethercomputer/RedPajama-Data-1T\n- togethercomputer/RedPajama-Data-Instruct\n- EleutherAI/pile\n- togethercomputer/Long-Data-Collections\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama2"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_flash_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""gsaivinay/open_llm_leaderboard"", ""EvanTHU/MotionLLM"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""kz-transformers/kaz-llm-lb"", ""gelnesr/Dyna-1"", ""felixz/open_llm_leaderboard"", ""HemaAM/GPT_train_on_LLaMa"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""Walid-Ahmed/Advanced_Text_Summarization"", ""anantgupta129/LitGPT-Pythia-160M"", ""BAAI/open_flageval_vlm_leaderboard"", ""Alex132/togethercomputer-LLaMA-2-7B-32K"", ""neubla/neubla-llm-evaluation-board"", ""Raju2024/TestLLM"", ""Wrightjay/togethercomputer-LLaMA-2-7B-32K"", ""PrarthanaTS/tsai-gpt-from-scratch"", ""MadhurGarg/TSAIGPTRedPajama"", ""WhiteKnightAI/togethercomputer-LLaMA-2-7B-32K"", ""ka1kuk/litellm"", ""RaviNaik/ERA-SESSION22"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""Sijuade/GPTNEXTWORD"", ""piyushgrover/MiniGPT_S22"", ""supra-e-acc/Pythia-160M-text-generate"", ""venkyyuvy/GPT_redpajama"", ""davegcat/togethercomputer-LLaMA-2-7B-32K"", ""mkthoma/GPT_From_Scratch"", ""VarunSivamani/GPT-From-Scratch"", ""Eberhenriquez/togethercomputer-LLaMA-2-7B-32K"", ""Buck3tHead/togethercomputer-LLaMA-2-7B-32K"", ""sanjanatule/GPTNext"", ""RashiAgarwal/TSAIGPTRedPajama"", ""neuralorbs/DialogGen"", ""GunaKoppula/ERA-Session-22"", ""Navyabhat/ERAV1-Session-22"", ""Knaledge/togethercomputer-LLaMA-2-7B-32K"", ""Arthurman70/togethercomputer-LLaMA-2-7B-32K"", ""Plurigrid/meow"", ""manu-codes/togethercomputer-LLaMA-2-7B-32K"", ""saket1619/togethercomputer-LLaMA-2-7B-32K"", ""Vaish2705/ERA_S22"", ""nonhuman/nnnn"", ""smothiki/open_llm_leaderboard"", ""blackwingedkite/gutalk"", ""tashp/togethercomputer-LLaMA-2-7B-32K"", ""suryacodr2034/togethercomputer-LLaMA-2-7B-32K"", ""agrimag/RISCRIVI_DESCRIZIONE"", ""EmbraceLab/togethercomputer-LLaMA-2-7B-32K"", ""blackwingedkite/alpaca2_clas"", ""0x1668/open_llm_leaderboard"", ""pngwn/open_llm_leaderboard-check"", ""asir0z/open_llm_leaderboard"", ""szimeus/togethercomputer-LLaMA-2-7B-32K"", ""Alexxshadow/togethercomputer-LLaMA-2-7B-32K"", ""kbmlcoding/open_llm_leaderboard_free"", ""pranjalkar9/togethercomputer-LLaMA-2-7B-32K"", ""ToletiSri/TSAI_S22"", ""kenken999/litellm"", ""kenken999/litellmlope"", ""K00B404/One-stop-till-you-drop"", ""aichampions/open_llm_leaderboard"", ""Adeco/open_llm_leaderboard"", ""NeerAbhy/Text_analyzer"", ""anirudh937/open_llm_leaderboard"", ""smothiki/open_llm_leaderboard2"", ""Asiya057/Incarna-Mind"", ""Asiya057/Incarna-Mind-POC"", ""mjalg/IFEvalTR"", ""mshook/Advanced_Text_Summarization"", ""lastsamuraii/LitGPT-Pythia-160M"", ""mohan007/sales_audio_analysis"", ""anonymousforpaper/M3Site"", ""waniberry66/HW_NLP"", ""jabbarcodes/job-model""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-28 01:14:07+00:00"", ""cardData"": ""datasets:\n- togethercomputer/RedPajama-Data-1T\n- togethercomputer/RedPajama-Data-Instruct\n- EleutherAI/pile\n- togethercomputer/Long-Data-Collections\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama2"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64c082bdaa57599de1b87af4"", ""modelId"": ""togethercomputer/LLaMA-2-7B-32K"", ""usedStorage"": 53908211130}",0,"https://huggingface.co/flytech/togetherchat-dev-7b, https://huggingface.co/flytech/togetherchat-dev-7b-v2, https://huggingface.co/flytech/Ruckus-7b-ALPHA, https://huggingface.co/flytech/Ruckus-7b-v17, https://huggingface.co/PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed",7,"https://huggingface.co/ajash/Amazon-lm, https://huggingface.co/ajash/Amazon-lm-10k, https://huggingface.co/alierenak/llama_7b_dialogue",3,"https://huggingface.co/mradermacher/LLaMA-2-7B-32K-GGUF, https://huggingface.co/mradermacher/LLaMA-2-7B-32K-i1-GGUF, https://huggingface.co/PrunaAI/togethercomputer-LLaMA-2-7B-32K-GGUF-smashed",3,"https://huggingface.co/therealchefdave/slumber-7b, https://huggingface.co/damerajee/Llamoe-test, https://huggingface.co/Kiruthikarthi/llamamistral-passthrough-merged-model, https://huggingface.co/ajay141/llama-qwen, https://huggingface.co/laislemke/LLaMA-2-vicuna-7b-slerp",5,"BAAI/open_cn_llm_leaderboard, BAAI/open_flageval_vlm_leaderboard, EvanTHU/MotionLLM, GTBench/GTBench, HemaAM/GPT_train_on_LLaMa, Intel/low_bit_open_llm_leaderboard, OPTML-Group/UnlearnCanvas-Benchmark, Sharathhebbar24/One-stop-for-Open-source-models, Vikhrmodels/small-shlepa-lb, Walid-Ahmed/Advanced_Text_Summarization, anantgupta129/LitGPT-Pythia-160M, gelnesr/Dyna-1, huggingface/InferenceSupport/discussions/new?title=togethercomputer/LLaMA-2-7B-32K&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btogethercomputer%2FLLaMA-2-7B-32K%5D(%2Ftogethercomputer%2FLLaMA-2-7B-32K)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",13
111
+ flytech/togetherchat-dev-7b,"---
112
+ license: llama2
113
+ base_model: togethercomputer/LLaMA-2-7B-32K
114
+ tags:
115
+ - generated_from_trainer
116
+ model-index:
117
+ - name: togetherchat-dev-7b
118
+ results: []
119
+ ---
120
+
121
+ # togetherchat-dev-7b
122
+
123
+ This model is a fine-tuned version of [togethercomputer/LLaMA-2-7B-32K](https://huggingface.co/togethercomputer/LLaMA-2-7B-32K) using 5000 examples and 3 datasets:
124
+
125
+ platypus_dataset = load_dataset(""garage-bAInd/Open-Platypus"")
126
+ codealpaca_dataset = load_dataset(""sahil2801/CodeAlpaca-20k"")
127
+ evol_codealpaca_dataset = load_dataset(""theblackcat102/evol-codealpaca-v1"")
128
+
129
+
130
+ ## Model description
131
+
132
+ Step Training Loss
133
+ ---------------------
134
+ 60 1.293000
135
+ 120 0.673600
136
+ 180 0.633200
137
+ 240 0.611600
138
+ 300 0.633000
139
+ 360 0.589500
140
+ 480 0.587600
141
+ 540 0.569000
142
+ 600 0.548700
143
+ 660 0.553100
144
+ 720 0.531500
145
+ 780 0.506400
146
+ 840 0.512500
147
+
148
+
149
+ ## Intended uses & limitations
150
+
151
+ More information needed
152
+
153
+ ## Training and evaluation data
154
+
155
+ More information needed
156
+
157
+ ## Training procedure
158
+
159
+ ### Training hyperparameters
160
+
161
+ The following hyperparameters were used during training:
162
+ - learning_rate: 0.0002
163
+ - train_batch_size: 8
164
+ - eval_batch_size: 8
165
+ - seed: 42
166
+ - gradient_accumulation_steps: 2
167
+ - total_train_batch_size: 16
168
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
169
+ - lr_scheduler_type: constant
170
+ - lr_scheduler_warmup_ratio: 0.1
171
+ - num_epochs: 3
172
+
173
+ ### Training results
174
+
175
+
176
+
177
+ ### Framework versions
178
+
179
+ - Transformers 4.33.1
180
+ - Pytorch 2.0.1+cu118
181
+ - Datasets 2.14.5
182
+ - Tokenizers 0.13.3
183
+ ","{""id"": ""flytech/togetherchat-dev-7b"", ""author"": ""flytech"", ""sha"": ""4d8b59990e1aa88a5e772b3d2190839055f57f06"", ""last_modified"": ""2023-09-09 19:46:21+00:00"", ""created_at"": ""2023-09-08 03:36:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""custom_code"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: togetherchat-dev-7b\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""togetherchat-dev-7b"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""auto_map"": {""AutoModelForCausalLM"": ""togethercomputer/LLaMA-2-7B-32K--modeling_flash_llama.LlamaForCausalLM""}, ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694221912.f3a14596601b.285.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694221912.f3a14596601b.285.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694283830.ef7f542c87ff.844564.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694283830.ef7f542c87ff.844564.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284111.ef7f542c87ff.846663.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284111.ef7f542c87ff.846663.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284157.ef7f542c87ff.846663.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284157.ef7f542c87ff.846663.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284425.ef7f542c87ff.848482.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694284425.ef7f542c87ff.848482.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gpt_llama_7btestv2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-09 19:46:21+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: togetherchat-dev-7b\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64fa96a014636d417a882ecc"", ""modelId"": ""flytech/togetherchat-dev-7b"", ""usedStorage"": 5338127438}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=flytech/togetherchat-dev-7b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bflytech%2Ftogetherchat-dev-7b%5D(%2Fflytech%2Ftogetherchat-dev-7b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
184
+ flytech/togetherchat-dev-7b-v2,"---
185
+ license: llama2
186
+ base_model: togethercomputer/LLaMA-2-7B-32K
187
+ tags:
188
+ - generated_from_trainer
189
+ model-index:
190
+ - name: togetherchat-dev-7b-v2
191
+ results: []
192
+ ---
193
+
194
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
195
+ should probably proofread and complete it, then remove this comment. -->
196
+
197
+ # togetherchat-dev-7b-v2
198
+
199
+ This model is a fine-tuned version of [togethercomputer/LLaMA-2-7B-32K](https://huggingface.co/togethercomputer/LLaMA-2-7B-32K) on 25000 entries for 3 epochs.
200
+
201
+ ## Model description
202
+
203
+ Model can be used for text-to-code generation and for further fine-tuning,
204
+ Colab notebook example (on free T4 GPU) soon!
205
+
206
+ ## Datasets used:
207
+
208
+ - evol-codealpaca-80k - 10000 entries
209
+ - codealpaca-20k - 10000 entries
210
+ - open-platypus - 5000 entries
211
+
212
+ ## Intended uses & limitations
213
+
214
+ Please remember that model may (and will) produce inaccurate informations,
215
+ you need to fine-tune it for your specific task.
216
+
217
+ ## Training and evaluation data
218
+
219
+ See 'Metrics'
220
+
221
+ ### Training hyperparameters
222
+
223
+ The following hyperparameters were used during training:
224
+ - learning_rate: 0.0001
225
+ - train_batch_size: 10
226
+ - eval_batch_size: 8
227
+ - seed: 42
228
+ - gradient_accumulation_steps: 4
229
+ - total_train_batch_size: 40
230
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
231
+ - lr_scheduler_type: linear
232
+ - lr_scheduler_warmup_ratio: 0.1
233
+ - num_epochs: 3
234
+
235
+ ### Training results
236
+
237
+
238
+
239
+ ### Framework versions
240
+
241
+ - Transformers 4.33.1
242
+ - Pytorch 2.0.1+cu118
243
+ - Datasets 2.14.5
244
+ - Tokenizers 0.13.3
245
+ ","{""id"": ""flytech/togetherchat-dev-7b-v2"", ""author"": ""flytech"", ""sha"": ""10cd9fc5f49862396d9b77387427e2ca0b6511e2"", ""last_modified"": ""2023-10-09 23:45:45+00:00"", ""created_at"": ""2023-09-09 19:52:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tensorboard"", ""safetensors"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""custom_code"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: togetherchat-dev-7b-v2\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""togetherchat-dev-7b-v2"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""auto_map"": {""AutoModelForCausalLM"": ""togethercomputer/LLaMA-2-7B-32K--modeling_flash_llama.LlamaForCausalLM""}, ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694289418.ef7f542c87ff.868235.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694289418.ef7f542c87ff.868235.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694289735.ef7f542c87ff.870591.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694289735.ef7f542c87ff.870591.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290286.ef7f542c87ff.872907.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290286.ef7f542c87ff.872907.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290449.ef7f542c87ff.874126.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290449.ef7f542c87ff.874126.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290692.ef7f542c87ff.875299.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694290692.ef7f542c87ff.875299.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694305483.ef7f542c87ff.875299.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694305483.ef7f542c87ff.875299.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694305654.ef7f542c87ff.936609.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694305654.ef7f542c87ff.936609.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694309350.ef7f542c87ff.951499.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694309350.ef7f542c87ff.951499.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694360236.ef7f542c87ff.1166609.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1694360236.ef7f542c87ff.1166609.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-10-09 23:45:45+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: togetherchat-dev-7b-v2\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64fcccf0e0dc35986bce291f"", ""modelId"": ""flytech/togetherchat-dev-7b-v2"", ""usedStorage"": 2098042799}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=flytech/togetherchat-dev-7b-v2&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bflytech%2Ftogetherchat-dev-7b-v2%5D(%2Fflytech%2Ftogetherchat-dev-7b-v2)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
246
+ flytech/Ruckus-7b-ALPHA,"---
247
+ license: llama2
248
+ base_model: togethercomputer/LLaMA-2-7B-32K
249
+ tags:
250
+ - generated_from_trainer
251
+ model-index:
252
+ - name: Ruckus-7b-ALPHA
253
+ results: []
254
+ ---
255
+
256
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
257
+ should probably proofread and complete it, then remove this comment. -->
258
+
259
+ # Ruckus-7b-ALPHA
260
+
261
+ This model is a fine-tuned version of [togethercomputer/LLaMA-2-7B-32K](https://huggingface.co/togethercomputer/LLaMA-2-7B-32K) on an unknown dataset.
262
+
263
+ ## Model description
264
+
265
+ More information needed
266
+
267
+ ## Intended uses & limitations
268
+
269
+ More information needed
270
+
271
+ ## Training and evaluation data
272
+
273
+ More information needed
274
+
275
+ ## Training procedure
276
+
277
+ ### Training hyperparameters
278
+
279
+ The following hyperparameters were used during training:
280
+ - learning_rate: 0.0002
281
+ - train_batch_size: 8
282
+ - eval_batch_size: 8
283
+ - seed: 42
284
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
285
+ - lr_scheduler_type: constant
286
+ - num_epochs: 8
287
+
288
+ ### Training results
289
+
290
+
291
+
292
+ ### Framework versions
293
+
294
+ - Transformers 4.33.1
295
+ - Pytorch 2.0.1+cu118
296
+ - Datasets 2.14.5
297
+ - Tokenizers 0.13.3
298
+ ","{""id"": ""flytech/Ruckus-7b-ALPHA"", ""author"": ""flytech"", ""sha"": ""799472379c6cacd416ec34ad1a82e71baeaa6f60"", ""last_modified"": ""2023-09-19 03:34:09+00:00"", ""created_at"": ""2023-09-18 22:55:06+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tensorboard"", ""safetensors"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""custom_code"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Ruckus-7b-ALPHA\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""Ruckus-7b-ALPHA"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""auto_map"": {""AutoModelForCausalLM"": ""togethercomputer/LLaMA-2-7B-32K--modeling_flash_llama.LlamaForCausalLM""}, ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695070720.ef7f542c87ff.675570.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695070720.ef7f542c87ff.675570.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695071467.ef7f542c87ff.679613.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695071467.ef7f542c87ff.679613.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695077887.ef7f542c87ff.702853.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695077887.ef7f542c87ff.702853.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-19 03:34:09+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Ruckus-7b-ALPHA\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6508d54a69739cd31007d15c"", ""modelId"": ""flytech/Ruckus-7b-ALPHA"", ""usedStorage"": 2048840176}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=flytech/Ruckus-7b-ALPHA&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bflytech%2FRuckus-7b-ALPHA%5D(%2Fflytech%2FRuckus-7b-ALPHA)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
299
+ flytech/Ruckus-7b-v17,"---
300
+ license: llama2
301
+ base_model: togethercomputer/LLaMA-2-7B-32K
302
+ tags:
303
+ - generated_from_trainer
304
+ model-index:
305
+ - name: Ruckus-7b-v17
306
+ results: []
307
+ ---
308
+
309
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
310
+ should probably proofread and complete it, then remove this comment. -->
311
+
312
+ # Ruckus-7b-v17
313
+
314
+ This model is a fine-tuned version of [togethercomputer/LLaMA-2-7B-32K](https://huggingface.co/togethercomputer/LLaMA-2-7B-32K) on an unknown dataset.
315
+
316
+ ## Model description
317
+
318
+ More information needed
319
+
320
+ ## Intended uses & limitations
321
+
322
+ More information needed
323
+
324
+ ## Training and evaluation data
325
+
326
+ More information needed
327
+
328
+ ## Training procedure
329
+
330
+ ### Training hyperparameters
331
+
332
+ The following hyperparameters were used during training:
333
+ - learning_rate: 0.0002
334
+ - train_batch_size: 8
335
+ - eval_batch_size: 8
336
+ - seed: 42
337
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
338
+ - lr_scheduler_type: constant
339
+ - num_epochs: 11
340
+
341
+ ### Training results
342
+
343
+
344
+
345
+ ### Framework versions
346
+
347
+ - Transformers 4.33.1
348
+ - Pytorch 2.0.1+cu118
349
+ - Datasets 2.14.5
350
+ - Tokenizers 0.13.3
351
+ ","{""id"": ""flytech/Ruckus-7b-v17"", ""author"": ""flytech"", ""sha"": ""ba22ff8aa9f5d0840597d446357b7ef6696ee212"", ""last_modified"": ""2023-09-19 21:27:02+00:00"", ""created_at"": ""2023-09-19 14:15:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tensorboard"", ""safetensors"", ""llama"", ""text-generation"", ""generated_from_trainer"", ""custom_code"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""license:llama2"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Ruckus-7b-v17\n results: []"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": [{""name"": ""Ruckus-7b-v17"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""auto_map"": {""AutoModelForCausalLM"": ""togethercomputer/LLaMA-2-7B-32K--modeling_flash_llama.LlamaForCausalLM""}, ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695132941.ef7f542c87ff.925612.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='events.out.tfevents.1695132941.ef7f542c87ff.925612.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/optimizer.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/rng_state.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='last-checkpoint/training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-19 21:27:02+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nlicense: llama2\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Ruckus-7b-v17\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6509ad011aece923f2f2e2f3"", ""modelId"": ""flytech/Ruckus-7b-v17"", ""usedStorage"": 5158776983}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=flytech/Ruckus-7b-v17&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bflytech%2FRuckus-7b-v17%5D(%2Fflytech%2FRuckus-7b-v17)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
352
+ PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed,"---
353
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
354
+ base_model: togethercomputer/LLaMA-2-7B-32K
355
+ metrics:
356
+ - memory_disk
357
+ - memory_inference
358
+ - inference_latency
359
+ - inference_throughput
360
+ - inference_CO2_emissions
361
+ - inference_energy_consumption
362
+ tags:
363
+ - pruna-ai
364
+ ---
365
+ <!-- header start -->
366
+ <!-- 200823 -->
367
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
368
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
369
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
370
+ </a>
371
+ </div>
372
+ <!-- header end -->
373
+
374
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
375
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
376
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
377
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
378
+
379
+ # Simply make AI models cheaper, smaller, faster, and greener!
380
+
381
+ - Give a thumbs up if you like this model!
382
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
383
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
384
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
385
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
386
+
387
+ ## Results
388
+
389
+ ![image info](./plots.png)
390
+
391
+ **Frequently Asked Questions**
392
+ - ***How does the compression work?*** The model is compressed with hqq.
393
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
394
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
395
+ - ***What is the model format?*** We use safetensors.
396
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
397
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
398
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
399
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
400
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
401
+
402
+ ## Setup
403
+
404
+ You can run the smashed model with these steps:
405
+
406
+ 0. Check requirements from the original repo togethercomputer/LLaMA-2-7B-32K installed. In particular, check python, cuda, and transformers versions.
407
+ 1. Make sure that you have installed quantization related packages.
408
+ ```bash
409
+ pip install hqq
410
+ ```
411
+ 2. Load & run the model.
412
+ ```python
413
+ from transformers import AutoModelForCausalLM, AutoTokenizer
414
+ from hqq.engine.hf import HQQModelForCausalLM
415
+ from hqq.models.hf.base import AutoHQQHFModel
416
+
417
+ try:
418
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed"", device_map='auto')
419
+ except:
420
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed"")
421
+ tokenizer = AutoTokenizer.from_pretrained(""togethercomputer/LLaMA-2-7B-32K"")
422
+
423
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
424
+
425
+ outputs = model.generate(input_ids, max_new_tokens=216)
426
+ tokenizer.decode(outputs[0])
427
+ ```
428
+
429
+ ## Configurations
430
+
431
+ The configuration info are in `smash_config.json`.
432
+
433
+ ## Credits & License
434
+
435
+ The license of the smashed model follows the license of the original model. Please check the license of the original model togethercomputer/LLaMA-2-7B-32K before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
436
+
437
+ ## Want to compress other models?
438
+
439
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
440
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""ce1aab9cf5984e5a71562b30fcfd2e9f54ce52e3"", ""last_modified"": ""2024-08-02 16:17:58+00:00"", ""created_at"": ""2024-06-24 11:32:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:17:58+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6679595f2c09cbf312b8c442"", ""modelId"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed"", ""usedStorage"": 2296481611}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed%5D(%2FPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
441
+ PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed,"---
442
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
443
+ base_model: togethercomputer/LLaMA-2-7B-32K
444
+ metrics:
445
+ - memory_disk
446
+ - memory_inference
447
+ - inference_latency
448
+ - inference_throughput
449
+ - inference_CO2_emissions
450
+ - inference_energy_consumption
451
+ tags:
452
+ - pruna-ai
453
+ ---
454
+ <!-- header start -->
455
+ <!-- 200823 -->
456
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
457
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
458
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
459
+ </a>
460
+ </div>
461
+ <!-- header end -->
462
+
463
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
464
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
465
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
466
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
467
+
468
+ # Simply make AI models cheaper, smaller, faster, and greener!
469
+
470
+ - Give a thumbs up if you like this model!
471
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
472
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
473
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
474
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
475
+
476
+ ## Results
477
+
478
+ ![image info](./plots.png)
479
+
480
+ **Frequently Asked Questions**
481
+ - ***How does the compression work?*** The model is compressed with hqq.
482
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
483
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
484
+ - ***What is the model format?*** We use safetensors.
485
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
486
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
487
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
488
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
489
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
490
+
491
+ ## Setup
492
+
493
+ You can run the smashed model with these steps:
494
+
495
+ 0. Check requirements from the original repo togethercomputer/LLaMA-2-7B-32K installed. In particular, check python, cuda, and transformers versions.
496
+ 1. Make sure that you have installed quantization related packages.
497
+ ```bash
498
+ pip install hqq
499
+ ```
500
+ 2. Load & run the model.
501
+ ```python
502
+ from transformers import AutoModelForCausalLM, AutoTokenizer
503
+ from hqq.engine.hf import HQQModelForCausalLM
504
+ from hqq.models.hf.base import AutoHQQHFModel
505
+
506
+ try:
507
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed"", device_map='auto')
508
+ except:
509
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed"")
510
+ tokenizer = AutoTokenizer.from_pretrained(""togethercomputer/LLaMA-2-7B-32K"")
511
+
512
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
513
+
514
+ outputs = model.generate(input_ids, max_new_tokens=216)
515
+ tokenizer.decode(outputs[0])
516
+ ```
517
+
518
+ ## Configurations
519
+
520
+ The configuration info are in `smash_config.json`.
521
+
522
+ ## Credits & License
523
+
524
+ The license of the smashed model follows the license of the original model. Please check the license of the original model togethercomputer/LLaMA-2-7B-32K before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
525
+
526
+ ## Want to compress other models?
527
+
528
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
529
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""105cd84804933a225910655cd35bfe27745dc13e"", ""last_modified"": ""2024-08-02 16:18:00+00:00"", ""created_at"": ""2024-06-24 11:33:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:18:00+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66795996335cad277cd4219d"", ""modelId"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed"", ""usedStorage"": 1486980939}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed%5D(%2FPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
530
+ PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed,"---
531
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
532
+ base_model: togethercomputer/LLaMA-2-7B-32K
533
+ metrics:
534
+ - memory_disk
535
+ - memory_inference
536
+ - inference_latency
537
+ - inference_throughput
538
+ - inference_CO2_emissions
539
+ - inference_energy_consumption
540
+ tags:
541
+ - pruna-ai
542
+ ---
543
+ <!-- header start -->
544
+ <!-- 200823 -->
545
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
546
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
547
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
548
+ </a>
549
+ </div>
550
+ <!-- header end -->
551
+
552
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
553
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
554
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
555
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
556
+
557
+ # Simply make AI models cheaper, smaller, faster, and greener!
558
+
559
+ - Give a thumbs up if you like this model!
560
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
561
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
562
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
563
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
564
+
565
+ ## Results
566
+
567
+ ![image info](./plots.png)
568
+
569
+ **Frequently Asked Questions**
570
+ - ***How does the compression work?*** The model is compressed with hqq.
571
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
572
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
573
+ - ***What is the model format?*** We use safetensors.
574
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
575
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
576
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
577
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
578
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
579
+
580
+ ## Setup
581
+
582
+ You can run the smashed model with these steps:
583
+
584
+ 0. Check requirements from the original repo togethercomputer/LLaMA-2-7B-32K installed. In particular, check python, cuda, and transformers versions.
585
+ 1. Make sure that you have installed quantization related packages.
586
+ ```bash
587
+ pip install hqq
588
+ ```
589
+ 2. Load & run the model.
590
+ ```python
591
+ from transformers import AutoModelForCausalLM, AutoTokenizer
592
+ from hqq.engine.hf import HQQModelForCausalLM
593
+ from hqq.models.hf.base import AutoHQQHFModel
594
+
595
+ try:
596
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed"", device_map='auto')
597
+ except:
598
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed"")
599
+ tokenizer = AutoTokenizer.from_pretrained(""togethercomputer/LLaMA-2-7B-32K"")
600
+
601
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
602
+
603
+ outputs = model.generate(input_ids, max_new_tokens=216)
604
+ tokenizer.decode(outputs[0])
605
+ ```
606
+
607
+ ## Configurations
608
+
609
+ The configuration info are in `smash_config.json`.
610
+
611
+ ## Credits & License
612
+
613
+ The license of the smashed model follows the license of the original model. Please check the license of the original model togethercomputer/LLaMA-2-7B-32K before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
614
+
615
+ ## Want to compress other models?
616
+
617
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
618
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""fee3782a86c9903bd549f76488546e2c9a68201a"", ""last_modified"": ""2024-08-02 16:18:01+00:00"", ""created_at"": ""2024-06-24 11:33:45+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""base_model:togethercomputer/LLaMA-2-7B-32K"", ""base_model:finetune:togethercomputer/LLaMA-2-7B-32K"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 16:18:01+00:00"", ""cardData"": ""base_model: togethercomputer/LLaMA-2-7B-32K\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66795999892c0a171612dfe1"", ""modelId"": ""PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed"", ""usedStorage"": 3915482955}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/togethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed%5D(%2FPrunaAI%2Ftogethercomputer-LLaMA-2-7B-32K-HQQ-4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Llama-2-70B-Chat-GGML_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ TheBloke/Llama-2-70B-Chat-GGML,"---
3
+ language:
4
+ - en
5
+ license: other
6
+ tags:
7
+ - facebook
8
+ - meta
9
+ - pytorch
10
+ - llama
11
+ - llama-2
12
+ model_name: Llama 2 70B Chat
13
+ inference: false
14
+ model_creator: Meta Llama 2
15
+ model_link: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
16
+ model_type: llama
17
+ pipeline_tag: text-generation
18
+ quantized_by: TheBloke
19
+ base_model: meta-llama/Llama-2-70b-chat-hf
20
+ ---
21
+
22
+ <!-- header start -->
23
+ <!-- 200823 -->
24
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
25
+ <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
26
+ </div>
27
+ <div style=""display: flex; justify-content: space-between; width: 100%;"">
28
+ <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
29
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
30
+ </div>
31
+ <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
32
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
33
+ </div>
34
+ </div>
35
+ <div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
36
+ <hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
37
+ <!-- header end -->
38
+
39
+ # Llama 2 70B Chat - GGML
40
+ - Model creator: [Meta Llama 2](https://huggingface.co/meta-llama)
41
+ - Original model: [Llama 2 70B Chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)
42
+
43
+ ## Description
44
+
45
+ This repo contains GGML format model files for [Meta Llama 2's Llama 2 70B Chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf).
46
+
47
+ ### Important note regarding GGML files.
48
+
49
+ The GGML format has now been superseded by GGUF. As of August 21st 2023, [llama.cpp](https://github.com/ggerganov/llama.cpp) no longer supports GGML models. Third party clients and libraries are expected to still support it for a time, but many may also drop support.
50
+
51
+ Please use the GGUF models instead.
52
+
53
+ ### About GGML
54
+
55
+ GPU acceleration is now available for Llama 2 70B GGML files, with both CUDA (NVidia) and Metal (macOS). The following clients/libraries are known to work with these files, including with GPU acceleration:
56
+ * [llama.cpp](https://github.com/ggerganov/llama.cpp), commit `e76d630` and later.
57
+ * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI.
58
+ * [KoboldCpp](https://github.com/LostRuins/koboldcpp), version 1.37 and later. A powerful GGML web UI, especially good for story telling.
59
+ * [LM Studio](https://lmstudio.ai/), a fully featured local GUI with GPU acceleration for both Windows and macOS. Use 0.1.11 or later for macOS GPU acceleration with 70B models.
60
+ * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), version 0.1.77 and later. A Python library with LangChain support, and OpenAI-compatible API server.
61
+ * [ctransformers](https://github.com/marella/ctransformers), version 0.2.15 and later. A Python library with LangChain support, and OpenAI-compatible API server.
62
+
63
+ ## Repositories available
64
+
65
+ * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-70B-chat-GPTQ)
66
+ * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGUF)
67
+ * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference (deprecated)](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGML)
68
+ * [Meta Llama 2's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)
69
+
70
+ ## Prompt template: Llama-2-Chat
71
+
72
+ ```
73
+ [INST] <<SYS>>
74
+ You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
75
+ <</SYS>>
76
+ {prompt}[/INST]
77
+
78
+ ```
79
+
80
+ <!-- compatibility_ggml start -->
81
+ ## Compatibility
82
+
83
+ ### Works with llama.cpp [commit `e76d630`](https://github.com/ggerganov/llama.cpp/commit/e76d630df17e235e6b9ef416c45996765d2e36fb) until August 21st, 2023
84
+
85
+ Will not work with `llama.cpp` after commit [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa).
86
+
87
+ For compatibility with latest llama.cpp, please use GGUF files instead.
88
+
89
+ Or one of the other tools and libraries listed above.
90
+
91
+ To use in llama.cpp, you must add `-gqa 8` argument.
92
+
93
+ For other UIs and libraries, please check the docs.
94
+
95
+ ## Explanation of the new k-quant methods
96
+ <details>
97
+ <summary>Click to see details</summary>
98
+
99
+ The new methods available are:
100
+ * GGML_TYPE_Q2_K - ""type-1"" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)
101
+ * GGML_TYPE_Q3_K - ""type-0"" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.
102
+ * GGML_TYPE_Q4_K - ""type-1"" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.
103
+ * GGML_TYPE_Q5_K - ""type-1"" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw
104
+ * GGML_TYPE_Q6_K - ""type-0"" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw
105
+ * GGML_TYPE_Q8_K - ""type-0"" 8-bit quantization. Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type.
106
+
107
+ Refer to the Provided Files table below to see what files use which methods, and how.
108
+ </details>
109
+ <!-- compatibility_ggml end -->
110
+
111
+ ## Provided files
112
+
113
+ | Name | Quant method | Bits | Size | Max RAM required | Use case |
114
+ | ---- | ---- | ---- | ---- | ---- | ----- |
115
+ | llama-2-70b-chat.ggmlv3.q2_K.bin | q2_K | 2 | 28.59 GB| 31.09 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors. |
116
+ | llama-2-70b-chat.ggmlv3.q3_K_S.bin | q3_K_S | 3 | 29.75 GB| 32.25 GB | New k-quant method. Uses GGML_TYPE_Q3_K for all tensors |
117
+ | llama-2-70b-chat.ggmlv3.q3_K_M.bin | q3_K_M | 3 | 33.04 GB| 35.54 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
118
+ | llama-2-70b-chat.ggmlv3.q3_K_L.bin | q3_K_L | 3 | 36.15 GB| 38.65 GB | New k-quant method. Uses GGML_TYPE_Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K |
119
+ | llama-2-70b-chat.ggmlv3.q4_0.bin | q4_0 | 4 | 38.87 GB| 41.37 GB | Original quant method, 4-bit. |
120
+ | llama-2-70b-chat.ggmlv3.q4_K_S.bin | q4_K_S | 4 | 38.87 GB| 41.37 GB | New k-quant method. Uses GGML_TYPE_Q4_K for all tensors |
121
+ | llama-2-70b-chat.ggmlv3.q4_K_M.bin | q4_K_M | 4 | 41.38 GB| 43.88 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q4_K |
122
+ | llama-2-70b-chat.ggmlv3.q4_1.bin | q4_1 | 4 | 43.17 GB| 45.67 GB | Original quant method, 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. |
123
+ | llama-2-70b-chat.ggmlv3.q5_0.bin | q5_0 | 5 | 47.46 GB| 49.96 GB | Original quant method, 5-bit. Higher accuracy, higher resource usage and slower inference. |
124
+ | llama-2-70b-chat.ggmlv3.q5_K_S.bin | q5_K_S | 5 | 47.46 GB| 49.96 GB | New k-quant method. Uses GGML_TYPE_Q5_K for all tensors |
125
+ | llama-2-70b-chat.ggmlv3.q5_K_M.bin | q5_K_M | 5 | 48.75 GB| 51.25 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q5_K |
126
+
127
+ **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead.
128
+
129
+ ## How to run in `llama.cpp`
130
+
131
+ Make sure you are using `llama.cpp` from commit [dadbed99e65252d79f81101a392d0d6497b86caa](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa) or earlier.
132
+
133
+ For compatibility with latest llama.cpp, please use GGUF files instead.
134
+
135
+ I use the following command line; adjust for your tastes and needs:
136
+
137
+ ```
138
+ ./main -t 10 -ngl 40 -gqa 8 -m llama-2-70b-chat.ggmlv3.q4_K_M.bin --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p ""[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\nWrite a story about llamas[/INST]""
139
+ ```
140
+ Change `-t 10` to the number of physical CPU cores you have. For example if your system has 8 cores/16 threads, use `-t 8`. If you are fully offloading the model to GPU, use `-t 1`
141
+
142
+ Change `-ngl 40` to the number of GPU layers you have VRAM for. Use `-ngl 100` to offload all layers to VRAM - if you have a 48GB card, or 2 x 24GB, or similar. Otherwise you can partially offload as many as you have VRAM for, on one or more GPUs.
143
+
144
+ If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins`
145
+
146
+ Remember the `-gqa 8` argument, required for Llama 70B models.
147
+
148
+ Change `-c 4096` to the desired sequence length for this model. For models that use RoPE, add `--rope-freq-base 10000 --rope-freq-scale 0.5` for doubled context, or `--rope-freq-base 10000 --rope-freq-scale 0.25` for 4x context.
149
+
150
+ For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)
151
+
152
+ ## How to run in `text-generation-webui`
153
+
154
+ Further instructions here: [text-generation-webui/docs/llama.cpp-models.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp-models.md).
155
+
156
+ <!-- footer start -->
157
+ <!-- 200823 -->
158
+ ## Discord
159
+
160
+ For further support, and discussions on these models and AI in general, join us at:
161
+
162
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
163
+
164
+ ## Thanks, and how to contribute.
165
+
166
+ Thanks to the [chirper.ai](https://chirper.ai) team!
167
+
168
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
169
+
170
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
171
+
172
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
173
+
174
+ * Patreon: https://patreon.com/TheBlokeAI
175
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
176
+
177
+ **Special thanks to**: Aemon Algiz.
178
+
179
+ **Patreon special mentions**: Russ Johnson, J, alfie_i, Alex, NimbleBox.ai, Chadd, Mandus, Nikolai Manek, Ken Nordquist, ya boyyy, Illia Dulskyi, Viktor Bowallius, vamX, Iucharbius, zynix, Magnesian, Clay Pascal, Pierre Kircher, Enrico Ros, Tony Hughes, Elle, Andrey, knownsqashed, Deep Realms, Jerry Meng, Lone Striker, Derek Yates, Pyrater, Mesiah Bishop, James Bentley, Femi Adebogun, Brandon Frisco, SuperWojo, Alps Aficionado, Michael Dempsey, Vitor Caleffi, Will Dee, Edmond Seymore, usrbinkat, LangChain4j, Kacper Wikieł, Luke Pendergrass, John Detwiler, theTransient, Nathan LeClaire, Tiffany J. Kim, biorpg, Eugene Pentland, Stanislav Ovsiannikov, Fred von Graf, terasurfer, Kalila, Dan Guido, Nitin Borwankar, 阿明, Ai Maven, John Villwock, Gabriel Puliatti, Stephen Murray, Asp the Wyvern, danny, Chris Smitley, ReadyPlayerEmma, S_X, Daniel P. Andersen, Olakabola, Jeffrey Morgan, Imad Khwaja, Caitlyn Gatomon, webtim, Alicia Loh, Trenton Dambrowitz, Swaroop Kallakuri, Erik Bjäreholt, Leonard Tan, Spiking Neurons AB, Luke @flexchar, Ajan Kanaga, Thomas Belote, Deo Leter, RoA, Willem Michiel, transmissions 11, subjectnull, Matthew Berman, Joseph William Delisle, David Ziegler, Michael Davis, Johann-Peter Hartmann, Talal Aujan, senxiiz, Artur Olbinski, Rainer Wilmers, Spencer Kim, Fen Risland, Cap'n Zoog, Rishabh Srivastava, Michael Levine, Geoffrey Montalvo, Sean Connelly, Alexandros Triantafyllidis, Pieter, Gabriel Tamborski, Sam, Subspace Studios, Junyu Yang, Pedro Madruga, Vadim, Cory Kujawski, K, Raven Klaugh, Randy H, Mano Prime, Sebastain Graf, Space Cruiser
180
+
181
+
182
+ Thank you to all my generous patrons and donaters!
183
+
184
+ And thank you again to a16z for their generous grant.
185
+
186
+ <!-- footer end -->
187
+
188
+ # Original model card: Meta Llama 2's Llama 2 70B Chat
189
+
190
+ # **Llama 2**
191
+ Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 70B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
192
+
193
+ ## Model Details
194
+ *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
195
+
196
+ Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
197
+
198
+ **Model Developers** Meta
199
+
200
+ **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
201
+
202
+ **Input** Models input text only.
203
+
204
+ **Output** Models generate text only.
205
+
206
+ **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
207
+
208
+
209
+ ||Training Data|Params|Content Length|GQA|Tokens|LR|
210
+ |---|---|---|---|---|---|---|
211
+ |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>|
212
+ |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>|
213
+ |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>|
214
+
215
+ *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
216
+
217
+ **Model Dates** Llama 2 was trained between January 2023 and July 2023.
218
+
219
+ **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
220
+
221
+ **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
222
+
223
+ **Research Paper** [""Llama-2: Open Foundation and Fine-tuned Chat Models""](arxiv.org/abs/2307.09288)
224
+
225
+ ## Intended Use
226
+ **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
227
+
228
+ **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
229
+
230
+ ## Hardware and Software
231
+ **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
232
+
233
+ **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
234
+
235
+ ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
236
+ |---|---|---|---|
237
+ |Llama 2 7B|184320|400|31.22|
238
+ |Llama 2 13B|368640|400|62.44|
239
+ |Llama 2 70B|1720320|400|291.42|
240
+ |Total|3311616||539.00|
241
+
242
+ **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
243
+
244
+ ## Training Data
245
+ **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
246
+
247
+ **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
248
+
249
+ ## Evaluation Results
250
+
251
+ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
252
+
253
+ |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
254
+ |---|---|---|---|---|---|---|---|---|---|
255
+ |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
256
+ |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
257
+ |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
258
+ |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
259
+ |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
260
+ |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
261
+ |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
262
+
263
+ **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
264
+
265
+ |||TruthfulQA|Toxigen|
266
+ |---|---|---|---|
267
+ |Llama 1|7B|27.42|23.00|
268
+ |Llama 1|13B|41.74|23.08|
269
+ |Llama 1|33B|44.19|22.57|
270
+ |Llama 1|65B|48.71|21.77|
271
+ |Llama 2|7B|33.29|**21.25**|
272
+ |Llama 2|13B|41.86|26.10|
273
+ |Llama 2|70B|**50.18**|24.60|
274
+
275
+ **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
276
+
277
+
278
+ |||TruthfulQA|Toxigen|
279
+ |---|---|---|---|
280
+ |Llama-2-Chat|7B|57.04|**0.00**|
281
+ |Llama-2-Chat|13B|62.18|**0.00**|
282
+ |Llama-2-Chat|70B|**64.14**|0.01|
283
+
284
+ **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
285
+
286
+ ## Ethical Considerations and Limitations
287
+ Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
288
+
289
+ Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
290
+
291
+ ## Reporting Issues
292
+ Please report any software “bug,” or other problems with the models through one of the following means:
293
+ - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
294
+ - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
295
+ - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
296
+
297
+ ## Llama Model Index
298
+ |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
299
+ |---|---|---|---|---|
300
+ |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
301
+ |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
302
+ |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
303
+ ","{""id"": ""TheBloke/Llama-2-70B-Chat-GGML"", ""author"": ""TheBloke"", ""sha"": ""d59cdcb92017737098edc4b293e4f0724021a475"", ""last_modified"": ""2023-09-27 13:00:24+00:00"", ""created_at"": ""2023-07-23 13:16:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 14, ""downloads_all_time"": null, ""likes"": 161, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""facebook"", ""meta"", ""pytorch"", ""llama-2"", ""text-generation"", ""en"", ""arxiv:2307.09288"", ""base_model:meta-llama/Llama-2-70b-chat-hf"", ""base_model:finetune:meta-llama/Llama-2-70b-chat-hf"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: meta-llama/Llama-2-70b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 70B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""llama""}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q2_K.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q3_K_L.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q3_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q3_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q4_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q4_1.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q4_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q4_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q5_0.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q5_K_M.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q5_K_S.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q6_K.z01', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q6_K.zip', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q8_0.z01', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='llama-2-70b-chat.ggmlv3.q8_0.zip', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 13:00:24+00:00"", ""cardData"": ""base_model: meta-llama/Llama-2-70b-chat-hf\nlanguage:\n- en\nlicense: other\nmodel_name: Llama 2 70B Chat\npipeline_tag: text-generation\ntags:\n- facebook\n- meta\n- pytorch\n- llama\n- llama-2\ninference: false\nmodel_creator: Meta Llama 2\nmodel_link: https://huggingface.co/meta-llama/Llama-2-70b-chat-hf\nmodel_type: llama\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""64bd283bae436c88130e6b5e"", ""modelId"": ""TheBloke/Llama-2-70B-Chat-GGML"", ""usedStorage"": 718454942698}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=TheBloke/Llama-2-70B-Chat-GGML&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FLlama-2-70B-Chat-GGML%5D(%2FTheBloke%2FLlama-2-70B-Chat-GGML)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Llama-3-Refueled_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ refuelai/Llama-3-Refueled,"---
3
+ license: cc-by-nc-4.0
4
+ language:
5
+ - en
6
+ library_name: transformers
7
+ tags:
8
+ - data labeling
9
+ ---
10
+ <div style=""width: auto; margin-left: auto; margin-right: auto; background-color:black"">
11
+ <img src=""https://assets-global.website-files.com/6423879a8f63c1bb18d74bfa/648818d56d04c3bdf36d71ab_Refuel_rev8-01_ts-p-1600.png"" alt=""Refuel.ai"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
12
+ </div>
13
+
14
+ ## Model Details
15
+
16
+ RefuelLLM-2-small, aka Llama-3-Refueled, is a Llama3-8B base model instruction tuned on a corpus of 2750+ datasets, spanning tasks such as classification, reading comprehension, structured attribute extraction and entity resolution. We're excited to open-source the model for the community to build on top of.
17
+
18
+ * More details about [RefuelLLM-2 family of models](https://www.refuel.ai/blog-posts/announcing-refuel-llm-2)
19
+ * You can also try out the models in our [LLM playground](https://labs.refuel.ai/playground)
20
+
21
+ **Model developers** - Refuel AI
22
+
23
+ **Input** - Text only.
24
+
25
+ **Output** - Text only.
26
+
27
+ **Architecture** - Llama-3-Refueled is built on top of Llama-3-8B-instruct which is an auto-regressive language model that uses an optimized transformer architecture.
28
+
29
+ **Release Date** - May 8, 2024.
30
+
31
+ **License** - [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/deed.en)
32
+
33
+ ## How to use
34
+
35
+ This repository contains weights for Llama-3-Refueled that are compatible for use with HuggingFace. See the snippet below for usage with Transformers:
36
+
37
+ ```python
38
+ >>> import torch
39
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer
40
+
41
+ >>> model_id = ""refuelai/Llama-3-Refueled""
42
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_id)
43
+ >>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map=""auto"")
44
+
45
+ >>> messages = [{""role"": ""user"", ""content"": ""Is this comment toxic or non-toxic: RefuelLLM is the new way to label text data!""}]
46
+
47
+ >>> inputs = tokenizer.apply_chat_template(messages, return_tensors=""pt"", add_generation_prompt=True).to(""cuda"")
48
+
49
+ >>> outputs = model.generate(inputs, max_new_tokens=20)
50
+ >>> print(tokenizer.decode(outputs[0]))
51
+ ```
52
+
53
+ ## Training Data
54
+
55
+ The model was both trained on over 4 Billion tokens, spanning 2750+ NLP tasks. Our training collection consists majorly of:
56
+ 1. Human annotated datasets like Flan, Task Source, and the Aya collection
57
+ 2. Synthetic datasets like OpenOrca, OpenHermes and WizardLM
58
+ 3. Proprietary datasets developed or licensed by Refuel AI
59
+
60
+ ## Benchmarks
61
+
62
+ In this section, we report the results for Refuel models on our benchmark of labeling tasks. For details on the methodology see [here](https://refuel.ai/blog-posts/announcing-refuel-llm-2).
63
+
64
+ <table>
65
+ <tr></tr>
66
+ <tr><th>Provider</th><th>Model</th><th colspan=""4"" style=""text-align: center"">LLM Output Quality (by task type)</tr>
67
+ <tr><td></td><td></td><td>Overall</td><td>Classification</td><td>Reading Comprehension</td><td>Structure Extraction</td><td>Entity Matching</td><td></td></tr>
68
+ <tr><td>Refuel</td><td>RefuelLLM-2</td><td>83.82%</td><td>84.94%</td><td>76.03%</td><td>88.16%</td><td>92.00%</td><td></td></tr>
69
+ <tr><td>OpenAI</td><td>GPT-4-Turbo</td><td>80.88%</td><td>81.77%</td><td>72.08%</td><td>84.79%</td><td>97.20%</td><td></td></tr>
70
+ <tr><td>Refuel</td><td>RefuelLLM-2-small (Llama-3-Refueled)</td><td>79.67%</td><td>81.72%</td><td>70.04%</td><td>84.28%</td><td>92.00%</td><td></td></tr>
71
+ <tr><td>Anthropic</td><td>Claude-3-Opus</td><td>79.19%</td><td>82.49%</td><td>67.30%</td><td>88.25%</td><td>94.96%</td><td></td></tr>
72
+ <tr><td>Meta</td><td>Llama3-70B-Instruct</td><td>78.20%</td><td>79.38%</td><td>66.03%</td><td>85.96%</td><td>94.13%</td><td></td></tr>
73
+ <tr><td>Google</td><td>Gemini-1.5-Pro</td><td>74.59%</td><td>73.52%</td><td>60.67%</td><td>84.27%</td><td>98.48%</td><td></td></tr>
74
+ <tr><td>Mistral</td><td>Mixtral-8x7B-Instruct</td><td>62.87%</td><td>79.11%</td><td>45.56%</td><td>47.08%</td><td>86.52%</td><td></td></tr>
75
+ <tr><td>Anthropic</td><td>Claude-3-Sonnet</td><td>70.99%</td><td>79.91%</td><td>45.44%</td><td>78.10%</td><td>96.34%</td><td></td></tr>
76
+ <tr><td>Anthropic</td><td>Claude-3-Haiku</td><td>69.23%</td><td>77.27%</td><td>50.19%</td><td>84.97%</td><td>54.08%</td><td></td></tr>
77
+ <tr><td>OpenAI</td><td>GPT-3.5-Turbo</td><td>68.13%</td><td>74.39%</td><td>53.21%</td><td>69.40%</td><td>80.41%</td><td></td></tr>
78
+ <tr><td>Meta</td><td>Llama3-8B-Instruct</td><td>62.30%</td><td>68.52%</td><td>49.16%</td><td>65.09%</td><td>63.61%</td><td></td></tr>
79
+ </table>
80
+
81
+
82
+ ## Limitations
83
+
84
+ The Llama-3-Refueled does not have any moderation mechanisms. We're looking forward to engaging with the community
85
+ on ways to make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.","{""id"": ""refuelai/Llama-3-Refueled"", ""author"": ""refuelai"", ""sha"": ""ff6d1c3ba37b31d4af421951c2300f2256fb3691"", ""last_modified"": ""2024-05-09 20:42:29+00:00"", ""created_at"": ""2024-05-03 05:16:26+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 317, ""downloads_all_time"": null, ""likes"": 190, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""data labeling"", ""conversational"", ""en"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\ntags:\n- data labeling"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='latest', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_0.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_1.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_2.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_3.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_4.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_5.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_6.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='rng_state_7.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='zero_to_fp32.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""Darok/Featherless-Feud"", ""emekaboris/try-this-model"", ""SC999/NV_Nemotron"", ""JackHoltone/try-this-model"", ""k11112/try-this-model""], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-09 20:42:29+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\ntags:\n- data labeling"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6634732a7d1c6fadb09dc36b"", ""modelId"": ""refuelai/Llama-3-Refueled"", ""usedStorage"": 16060692664}",0,https://huggingface.co/QuietImpostor/Llama-3-Refueled-Pruned,1,,0,"https://huggingface.co/solidrust/Llama-3-Refueled-AWQ, https://huggingface.co/mradermacher/Llama-3-Refueled-GGUF, https://huggingface.co/mradermacher/Llama-3-Refueled-i1-GGUF, https://huggingface.co/tensorblock/Llama-3-Refueled-GGUF",4,"https://huggingface.co/sethuiyer/Medichat-V2-Llama3-8B, https://huggingface.co/DreadPoor/Spei_Meridiem-8B-model_stock, https://huggingface.co/PJMixers-Archive/LLaMa-3-CursedStock-v2.0-8B, https://huggingface.co/Casual-Autopsy/L3-Deluxe-Scrambled-Eggs-On-Toast-8B, https://huggingface.co/ehristoforu/testllama, https://huggingface.co/PJMixers-Archive/LLaMa-3-CursedStock-v1.6-8B, https://huggingface.co/PJMixers-Archive/LLaMa-3-CursedStock-v1.8-8B, https://huggingface.co/DreadPoor/ONeil-model_stock-8B, https://huggingface.co/QuantFactory/L3-Deluxe-Scrambled-Eggs-On-Toast-8B-GGUF, https://huggingface.co/sagnik-p/medical_llm, https://huggingface.co/bunnycore/Llama-3.1-8B-OmniMatrix-v2, https://huggingface.co/QuantFactory/Llama-3.1-8B-OmniMatrix-v2-GGUF, https://huggingface.co/bunnycore/Llama-3.1-8B-TitanFusion, https://huggingface.co/catrinbaze/llama-refueled-merge, https://huggingface.co/catrinbaze/refueled-hermes-2-pro-slerp, https://huggingface.co/catrinbaze/refueled-slerp, https://huggingface.co/DreadPoor/LemonP-8B-Model_Stock, https://huggingface.co/DreadPoor/Spring_Dusk-8B-SCE, https://huggingface.co/jaspionjader/bh-1, https://huggingface.co/mergekit-community/mergekit-model_stock-adqzxpt, https://huggingface.co/mergekit-community/mergekit-model_stock-dotdour",21,"Darok/Featherless-Feud, JackHoltone/try-this-model, SC999/NV_Nemotron, emekaboris/try-this-model, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=refuelai/Llama-3-Refueled&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Brefuelai%2FLlama-3-Refueled%5D(%2Frefuelai%2FLlama-3-Refueled)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, k11112/try-this-model",7
86
+ QuietImpostor/Llama-3-Refueled-Pruned,"---
87
+ base_model:
88
+ - refuelai/Llama-3-Refueled
89
+ library_name: transformers
90
+ tags:
91
+ - mergekit
92
+ - merge
93
+ license: llama3
94
+ datasets:
95
+ - yahma/alpaca-cleaned
96
+ language:
97
+ - en
98
+ ---
99
+ ### Pruning Details
100
+
101
+ This is a prune of [Llama 3 Refueled](https://www.huggingface.co/refuelai/llama-3-refueled) using [mergekit](https://github.com/cg123/mergekit) and [PruneMe](https://www.github.com/arcee-ai/PruneMe)
102
+ The model is semi-tested, but still needs some debugging, namely with converting to GGUF, though I am working on that.
103
+
104
+ Note: the [dataset](https://www.huggingface.co/yahma/alpaca-cleaned) was used for evaluating what layers should be pruned. This model was **NOT** finetuned.
105
+
106
+ ### Performance
107
+ After only 1 test because of lack of compute and for stupid long inference times on my 3060ti (8GB), it does show some interesting results.
108
+ Here's the response after being prompted ""Hi!"" using the [example from Meta](https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3).
109
+
110
+ ```model_response
111
+ vel tips and recommendations.user
112
+ Hi!assistant
113
+ Hi! I can help you find the best travel tips and recommendations for your next trip. Where you most interested to travel and what kind of activities you most to to the 9e sure, we can start and letiing 10e 11e 12e 13e 14e 15e 16e 17e 18e 19e 20e 21e 23e 24e 5e 6e 7e 8e 9e 10e 11e 12e 13e 14e 15e
114
+ ```
115
+
116
+ Even without finetuning, the model still exhibits some extent of instruction following.
117
+ And fine-tuning is a WIP and I will update this when it's ready.
118
+ Finetuning is no longer in progress due to issues with unsloth. However, I am working on a project that will hopefully make pruning models easier.
119
+
120
+ ### Configuration
121
+
122
+ The following YAML configuration was used to produce this model:
123
+
124
+ ```yaml
125
+ slices:
126
+ - sources:
127
+ - model: refuelai/Llama-3-Refueled
128
+ layer_range: [0, 19]
129
+ - sources:
130
+ - model: refuelai/Llama-3-Refueled
131
+ layer_range: [29, 32]
132
+
133
+ merge_method: passthrough
134
+ dtype: bfloat16
135
+ ```","{""id"": ""QuietImpostor/Llama-3-Refueled-Pruned"", ""author"": ""QuietImpostor"", ""sha"": ""997186e4683d6648413cc6f5b664d9e30c558627"", ""last_modified"": ""2024-05-28 17:31:08+00:00"", ""created_at"": ""2024-05-21 19:26:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""en"", ""dataset:yahma/alpaca-cleaned"", ""base_model:refuelai/Llama-3-Refueled"", ""base_model:finetune:refuelai/Llama-3-Refueled"", ""license:llama3"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- refuelai/Llama-3-Refueled\ndatasets:\n- yahma/alpaca-cleaned\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3\ntags:\n- mergekit\n- merge"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mergekit_config.yml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 5849141248}, ""total"": 5849141248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-28 17:31:08+00:00"", ""cardData"": ""base_model:\n- refuelai/Llama-3-Refueled\ndatasets:\n- yahma/alpaca-cleaned\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3\ntags:\n- mergekit\n- merge"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""664cf560567cb471efe736e0"", ""modelId"": ""QuietImpostor/Llama-3-Refueled-Pruned"", ""usedStorage"": 11698305888}",1,,0,,0,https://huggingface.co/mradermacher/Llama-3-Refueled-Pruned-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=QuietImpostor/Llama-3-Refueled-Pruned&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQuietImpostor%2FLlama-3-Refueled-Pruned%5D(%2FQuietImpostor%2FLlama-3-Refueled-Pruned)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Llama-3_1-Nemotron-51B-Instruct_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ nvidia/Llama-3_1-Nemotron-51B-Instruct,"---
3
+ library_name: transformers
4
+ pipeline_tag: text-generation
5
+ language:
6
+ - en
7
+ tags:
8
+ - nvidia
9
+ - llama-3
10
+ - pytorch
11
+ license: other
12
+ license_name: nvidia-open-model-license
13
+ license_link: >-
14
+ https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
15
+ ---
16
+
17
+ # Llama-3_1-Nemotron-51B-instruct
18
+
19
+
20
+
21
+ ## Model Overview
22
+ Llama-3_1-Nemotron-51B-instruct is a model which offers a great tradeoff between model accuracy and efficiency. Efficiency (throughput) directly translates to price, providing great ‘quality-per-dollar’. Using a novel Neural Architecture Search (NAS) approach we greatly reduce the model’s memory footprint, enabling larger workloads, as well as fitting the model on a single GPU at high workloads (H100-80GB). This NAS approach enables the selection of a desired point in the accuracy-efficiency tradeoff. This model is ready for commercial use.
23
+
24
+
25
+ ## License
26
+ Your use of this model is governed by the [NVIDIA Open Model License](https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf).
27
+ Additional Information: [Llama 3.1 Community License Agreement](https://www.llama.com/llama3_1/license/). Built with Llama.
28
+
29
+ ## How was the model developed
30
+
31
+ Llama-3_1-Nemotron-51B-instruct is a large language model (LLM) which is a derivative of Llama-3.1-70B-instruct (AKA the reference model). We utilize a block-wise distillation of the reference model, where for each block we create multiple variants providing different tradeoffs of quality vs. computational complexity. We then search over the blocks to create a model which meets the required throughput and memory (optimized for a single H100-80GB GPU) while minimizing the quality degradation. The model then undergoes knowledge distillation (KD), with a focus on English single and multi-turn chat use-cases.
32
+ The KD step included 40 billion tokens consisting of a mixture of 3 datasets - FineWeb, Buzz-V1.2 and Dolma.
33
+
34
+ Links to [NIM](https://build.nvidia.com/nvidia/llama-3_1-nemotron-51b-instruct), [blog](https://developer.nvidia.com/blog/advancing-the-accuracy-efficiency-frontier-with-llama-3-1-nemotron-51b/) and [huggingface](https://huggingface.co/nvidia/Llama-3_1-Nemotron-51B-Instruct)
35
+
36
+
37
+
38
+ This results in a final model that is aligned for human chat preferences.
39
+
40
+ **Model Developers:** NVIDIA
41
+
42
+ **Model Input:** Text only
43
+
44
+ **Model Output:** Text only
45
+
46
+ **Model Dates:** Llama-3_1-Nemotron-51B-instruct was trained between August and September 2024
47
+
48
+ **Data Freshness:** The pretraining data has a cutoff of 2023
49
+
50
+ **Sequence Length Used During Distillation:** 8192
51
+
52
+
53
+ ## Quick Start
54
+ Our code requires the `transformers` package version to be 4.44.2 or higher
55
+
56
+ See the snippet below for usage with transformers:
57
+ ```python
58
+ import torch
59
+ import transformers
60
+
61
+ model_id = ""nvidia/Llama-3_1-Nemotron-51B-Instruct""
62
+ model_kwargs = {""torch_dtype"": torch.bfloat16, ""trust_remote_code"": True, ""device_map"": ""auto""}
63
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
64
+ tokenizer.pad_token_id = tokenizer.eos_token_id
65
+
66
+ pipeline = transformers.pipeline(
67
+ ""text-generation"",
68
+ model=model_id,
69
+ tokenizer=tokenizer,
70
+ max_new_tokens=20,
71
+ **model_kwargs
72
+ )
73
+ print(pipeline([{""role"": ""user"", ""content"": ""Hey how are you?""}]))
74
+ ```
75
+
76
+
77
+
78
+ ## Required Hardware
79
+
80
+ FP8 Inference (recommended):
81
+ - 1x H100-80GB GPU
82
+
83
+ BF16 Inference:
84
+ - 2x H100-80GB GPUs
85
+ - 2x A100-80GB GPUs
86
+
87
+
88
+ ## Model Architecture
89
+ The model is a derivative of Llama-3.1-70B, using Neural Architecture Search (NAS). The NAS algorithm results in non-standard and non-repetitive blocks. This includes the following:
90
+ * Variable Grouped Query Attention (VGQA) - each block can have a different number of KV (keys and values) heads, ranging from 1 to Llama’s typical 8.
91
+ * Skip attention - in some blocks the attention is skipped entirely, or replaced with a single linear layer.
92
+ * Variable FFN - the expansion/compression ratio in the FFN layer is different between blocks.
93
+
94
+
95
+ **Architecture Type:** Transformer Decoder (auto-regressive language model)
96
+
97
+ ## Software Integration
98
+ **Runtime Engine(s):**
99
+ * NeMo 24.05 <br>
100
+
101
+
102
+ **Supported Hardware Architecture Compatibility:** NVIDIA H100, A100 80GB (BF16 quantization).
103
+
104
+ **[Preferred/Supported] Operating System(s):** <br>
105
+ * Linux <br>
106
+
107
+ ## Intended use
108
+
109
+ Llama-3_1-Nemotron-51B-Instruct is a general purpose chat model intended to be used in English and coding languages. Other non-English languages are also supported.
110
+
111
+ ## Evaluation Results
112
+
113
+ **Data Collection Method by dataset** <br>
114
+ * Automated <br>
115
+
116
+
117
+ ### MT-Bench
118
+
119
+ Evaluated using select datasets from the [Judging LLM-as-a-Judge with MT-Bench and Chatbot Arena](https://arxiv.org/pdf/2306.05685v4)
120
+ MT-bench - 8.99
121
+
122
+
123
+ ### MMLU
124
+
125
+ Evaluated using the Multi-task Language Understanding benchmarks as introduced in [Measuring Massive Multitask Language Understanding](https://arxiv.org/pdf/2009.03300)
126
+
127
+ |MMLU (5-shot) |
128
+ | :----------------- |
129
+ | 80.2% |
130
+
131
+ ### GSM8K
132
+
133
+ Evaluated using the Grade School Math 8K (GSM8K) benchmark as introduced in [Training Verifiers to Solve Math Word Problems](https://arxiv.org/pdf/2110.14168v2)
134
+
135
+ |GSM8K (5-shot) |
136
+ | :----------------- |
137
+ | 91.43% |
138
+
139
+ ### Winogrande
140
+
141
+ |Winogrande (5-shot) |
142
+ | :----------------- |
143
+ | 84.53% |
144
+
145
+ ### Arc-C
146
+
147
+ |Arc challenge (25-shot) |
148
+ | :----------------- |
149
+ | 69.20% |
150
+
151
+ ### Hellaswag
152
+
153
+ |Hellaswag (10-shot) |
154
+ | :----------------- |
155
+ | 85.58% |
156
+
157
+ ### Truthful QA
158
+
159
+ |TruthfulQA (0-shot) |
160
+ | :----------------- |
161
+ | 58.63%% |
162
+
163
+ ## Limitations
164
+
165
+ The model was trained on data that contains toxic language, unsafe content, and societal biases originally crawled from the internet. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The model may generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.
166
+
167
+ The model demonstrates weakness to alignment-breaking attacks. Users are advised to deploy language model guardrails alongside this model to prevent potentially harmful outputs.
168
+
169
+ ## Adversarial Testing and Red Teaming Efforts
170
+
171
+ The Llama-3_1-Nemotron-51B-instruct model underwent extensive safety evaluation including adversarial testing via three distinct methods:
172
+ * [Garak](https://docs.garak.ai/garak), is an automated LLM vulnerability scanner that probes for common weaknesses, including prompt injection and data leakage.
173
+ * [AEGIS](https://arxiv.org/pdf/2404.05993), is a content safety evaluation dataset and LLM based content safety classifier model, that adheres to a broad taxonomy of 13 categories of critical risks in human-LLM interactions.
174
+ * Human Content Red Teaming leveraging human interaction and evaluation of the models' responses.
175
+
176
+
177
+ ## Inference
178
+ **Engine:** Tensor(RT) <br>
179
+ **Test Hardware** H100-80GB <br>
180
+
181
+
182
+ ## Ethical Considerations
183
+ NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
184
+
185
+ Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).","{""id"": ""nvidia/Llama-3_1-Nemotron-51B-Instruct"", ""author"": ""nvidia"", ""sha"": ""2e9bdbfa8e1ab73ce98f5960143cf927192d59e7"", ""last_modified"": ""2025-04-08 17:43:57+00:00"", ""created_at"": ""2024-09-22 07:24:59+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 107666, ""downloads_all_time"": null, ""likes"": 206, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""nemotron-nas"", ""text-generation"", ""nvidia"", ""llama-3"", ""pytorch"", ""conversational"", ""custom_code"", ""en"", ""arxiv:2306.05685"", ""arxiv:2009.03300"", ""arxiv:2110.14168"", ""arxiv:2404.05993"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeciLMForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_decilm.DeciLMConfig"", ""AutoModelForCausalLM"": ""modeling_decilm.DeciLMForCausalLM""}, ""model_type"": ""nemotron-nas"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \""26 Jul 2024\"" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \""\"" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \""<|start_header_id|>system<|end_header_id|>\\n\\n\"" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \""Environment: ipython\\n\"" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \""Tools: \"" + builtin_tools | reject('equalto', 'code_interpreter') | join(\"", \"") + \""\\n\\n\""}}\n{%- endif %}\n{{- \""Cutting Knowledge Date: December 2023\\n\"" }}\n{{- \""Today Date: \"" + date_string + \""\\n\\n\"" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \""You have access to the following functions. To call a function, please respond with JSON for a function call.\"" }}\n {{- 'Respond in the format {\""name\"": function name, \""parameters\"": dictionary of argument name and its value}.' }}\n {{- \""Do not use variables.\\n\\n\"" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \""\\n\\n\"" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \""<|eot_id|>\"" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\""Cannot put tools in the first user message when there's no first user message!\"") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \""Given the following functions, please respond with a JSON for a function call \"" }}\n {{- \""with its proper arguments that best answers the given prompt.\\n\\n\"" }}\n {{- 'Respond in the format {\""name\"": function name, \""parameters\"": dictionary of argument name and its value}.' }}\n {{- \""Do not use variables.\\n\\n\"" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \""\\n\\n\"" }}\n {%- endfor %}\n {{- first_user_message + \""<|eot_id|>\""}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\""This model only supports single tool-calls at once!\"") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \""<|python_tag|>\"" + tool_call.name + \"".call(\"" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\""' + arg_val + '\""' }}\n {%- if not loop.last %}\n {{- \"", \"" }}\n {%- endif %}\n {%- endfor %}\n {{- \"")\"" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\""name\"": \""' + tool_call.name + '\"", ' }}\n {{- '\""parameters\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \""}\"" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \""<|eom_id|>\"" }}\n {%- else %}\n {{- \""<|eot_id|>\"" }}\n {%- endif %}\n {%- elif message.role == \""tool\"" or message.role == \""ipython\"" %}\n {{- \""<|start_header_id|>ipython<|end_header_id|>\\n\\n\"" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \""<|eot_id|>\"" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|eot_id|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='__init__.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_chat_template.jinja', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__activations.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__cache_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__configuration_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_attn_mask_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_flash_attention_utils_backward_compat.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_outputs.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_rope_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__pytorch_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='variable_cache.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""Mikhil-jivus/EndpointTesting"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 51501015040}, ""total"": 51501015040}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-08 17:43:57+00:00"", ""cardData"": ""language:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66efc64bfdbf4425c8de549c"", ""modelId"": ""nvidia/Llama-3_1-Nemotron-51B-Instruct"", ""usedStorage"": 103002103896}",0,https://huggingface.co/huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated,1,,0,"https://huggingface.co/lmstudio-community/Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/DevQuasar/nvidia.Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/mradermacher/Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/mradermacher/Llama-3_1-Nemotron-51B-Instruct-i1-GGUF, https://huggingface.co/bartowski/Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/second-state/Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/gaianet/Llama-3_1-Nemotron-51B-Instruct-GGUF, https://huggingface.co/tensorblock/Llama-3_1-Nemotron-51B-Instruct-GGUF",8,,0,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, Mikhil-jivus/EndpointTesting, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=nvidia/Llama-3_1-Nemotron-51B-Instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnvidia%2FLlama-3_1-Nemotron-51B-Instruct%5D(%2Fnvidia%2FLlama-3_1-Nemotron-51B-Instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ruslanmv/convert_to_gguf, totolook/Quant",8
186
+ huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated,"---
187
+ base_model:
188
+ - nvidia/Llama-3_1-Nemotron-51B-Instruct
189
+ library_name: transformers
190
+ pipeline_tag: text-generation
191
+ language:
192
+ - en
193
+ tags:
194
+ - nvidia
195
+ - llama-3
196
+ - pytorch
197
+ - abliterated
198
+ - uncensored
199
+ license: other
200
+ license_name: nvidia-open-model-license
201
+ license_link: >-
202
+ https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
203
+ ---
204
+
205
+ # huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated
206
+
207
+ This is an uncensored version of [nvidia/Llama-3_1-Nemotron-51B-Instruct](https://huggingface.co/nvidia/Llama-3_1-Nemotron-51B-Instruct) created with abliteration (see [remove-refusals-with-transformers](https://github.com/Sumandora/remove-refusals-with-transformers) to know more about it).
208
+ This is a crude, proof-of-concept implementation to remove refusals from an LLM model without using TransformerLens.
209
+
210
+ ### Donation
211
+ ##### Your donation helps us continue our further development and improvement, a cup of coffee can do it.
212
+ - bitcoin:
213
+ ```
214
+ bc1qqnkhuchxw0zqjh2ku3lu4hq45hc6gy84uk70ge
215
+ ```
216
+ ","{""id"": ""huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated"", ""author"": ""huihui-ai"", ""sha"": ""a684254b721363f03eb280fd71b9e6905a0c1653"", ""last_modified"": ""2025-02-12 20:05:59+00:00"", ""created_at"": ""2025-02-11 16:11:27+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 21, ""downloads_all_time"": null, ""likes"": 4, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""nemotron-nas"", ""text-generation"", ""nvidia"", ""llama-3"", ""pytorch"", ""abliterated"", ""uncensored"", ""conversational"", ""custom_code"", ""en"", ""base_model:nvidia/Llama-3_1-Nemotron-51B-Instruct"", ""base_model:finetune:nvidia/Llama-3_1-Nemotron-51B-Instruct"", ""license:other"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- nvidia/Llama-3_1-Nemotron-51B-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch\n- abliterated\n- uncensored"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeciLMForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_decilm.DeciLMConfig"", ""AutoModelForCausalLM"": ""modeling_decilm.DeciLMForCausalLM""}, ""model_type"": ""nemotron-nas"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \""26 Jul 2024\"" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \""\"" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \""<|start_header_id|>system<|end_header_id|>\\n\\n\"" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \""Environment: ipython\\n\"" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \""Tools: \"" + builtin_tools | reject('equalto', 'code_interpreter') | join(\"", \"") + \""\\n\\n\""}}\n{%- endif %}\n{{- \""Cutting Knowledge Date: December 2023\\n\"" }}\n{{- \""Today Date: \"" + date_string + \""\\n\\n\"" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \""You have access to the following functions. To call a function, please respond with JSON for a function call.\"" }}\n {{- 'Respond in the format {\""name\"": function name, \""parameters\"": dictionary of argument name and its value}.' }}\n {{- \""Do not use variables.\\n\\n\"" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \""\\n\\n\"" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \""<|eot_id|>\"" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\""Cannot put tools in the first user message when there's no first user message!\"") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \""Given the following functions, please respond with a JSON for a function call \"" }}\n {{- \""with its proper arguments that best answers the given prompt.\\n\\n\"" }}\n {{- 'Respond in the format {\""name\"": function name, \""parameters\"": dictionary of argument name and its value}.' }}\n {{- \""Do not use variables.\\n\\n\"" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \""\\n\\n\"" }}\n {%- endfor %}\n {{- first_user_message + \""<|eot_id|>\""}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\""This model only supports single tool-calls at once!\"") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \""<|python_tag|>\"" + tool_call.name + \"".call(\"" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\""' + arg_val + '\""' }}\n {%- if not loop.last %}\n {{- \"", \"" }}\n {%- endif %}\n {%- endfor %}\n {{- \"")\"" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\""name\"": \""' + tool_call.name + '\"", ' }}\n {{- '\""parameters\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \""}\"" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \""<|eom_id|>\"" }}\n {%- else %}\n {{- \""<|eot_id|>\"" }}\n {%- endif %}\n {%- elif message.role == \""tool\"" or message.role == \""ipython\"" %}\n {{- \""<|start_header_id|>ipython<|end_header_id|>\\n\\n\"" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \""<|eot_id|>\"" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|eot_id|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00022.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_decilm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_chat_template.jinja', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__activations.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__cache_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__configuration_llama.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_attn_mask_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_flash_attention_utils_backward_compat.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_outputs.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__modeling_rope_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformers_4_44_2__pytorch_utils.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='variable_cache.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 51501015040}, ""total"": 51501015040}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-12 20:05:59+00:00"", ""cardData"": ""base_model:\n- nvidia/Llama-3_1-Nemotron-51B-Instruct\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: nvidia-open-model-license\nlicense_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf\npipeline_tag: text-generation\ntags:\n- nvidia\n- llama-3\n- pytorch\n- abliterated\n- uncensored"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_decilm.DeciLMForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""67ab76af410f8d34819c3614"", ""modelId"": ""huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated"", ""usedStorage"": 103019313817}",1,,0,,0,"https://huggingface.co/DevQuasar/huihui-ai.Llama-3_1-Nemotron-51B-Instruct-abliterated-GGUF, https://huggingface.co/mradermacher/Llama-3_1-Nemotron-51B-Instruct-abliterated-GGUF, https://huggingface.co/mradermacher/Llama-3_1-Nemotron-51B-Instruct-abliterated-i1-GGUF",3,,0,huggingface/InferenceSupport/discussions/new?title=huihui-ai/Llama-3_1-Nemotron-51B-Instruct-abliterated&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhuihui-ai%2FLlama-3_1-Nemotron-51B-Instruct-abliterated%5D(%2Fhuihui-ai%2FLlama-3_1-Nemotron-51B-Instruct-abliterated)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
MiniMax-VL-01_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ MiniMaxAI/MiniMax-VL-01,N/A,N/A,0,,0,,0,,0,,0,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, TIGER-Lab/MEGA-Bench, awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=MiniMaxAI/MiniMax-VL-01&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMiniMaxAI%2FMiniMax-VL-01%5D(%2FMiniMaxAI%2FMiniMax-VL-01)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ruslanmv/convert_to_gguf, totolook/Quant",9
Molmo-72B-0924_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ allenai/Molmo-72B-0924,"---
3
+ license: apache-2.0
4
+ language:
5
+ - en
6
+ base_model:
7
+ - openai/clip-vit-large-patch14-336
8
+ - Qwen/Qwen2-72B
9
+ pipeline_tag: image-text-to-text
10
+ tags:
11
+ - multimodal
12
+ - olmo
13
+ - molmo
14
+ - pixmo
15
+ library_name: transformers
16
+ ---
17
+
18
+ <img src=""molmo_logo.png"" alt=""Logo for the Molmo Project"" style=""width: auto; height: 50px;"">
19
+
20
+ # Molmo 72B
21
+
22
+ Molmo is a family of open vision-language models developed by the Allen Institute for AI. Molmo models are trained on PixMo, a dataset of 1 million, highly-curated image-text pairs. It has state-of-the-art performance among multimodal models with a similar size while being fully open-source. You can find all models in the Molmo family [here](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19).
23
+ **Learn more** about the Molmo family [in our announcement blog post](https://molmo.allenai.org/blog) or the [paper](https://huggingface.co/papers/2409.17146).
24
+
25
+ Molmo 72B is based on [Qwen2-72B](https://huggingface.co/Qwen/Qwen2-72B) and uses [OpenAI CLIP](https://huggingface.co/openai/clip-vit-large-patch14-336) as vision backbone.
26
+ Molmo-72B achieves the highest academic benchmark score and ranks second on human evaluation, just slightly behind GPT-4o.
27
+
28
+ This checkpoint is a **preview** of the Molmo release. All artifacts used in creating Molmo (PixMo dataset, training code, evaluations, intermediate checkpoints) will be made available at a later date, furthering our commitment to open-source AI development and reproducibility.
29
+
30
+ [**Sign up here**](https://docs.google.com/forms/d/e/1FAIpQLSdML1MhNNBDsCHpgWG65Oydg2SjZzVasyqlP08nBrWjZp_c7A/viewform) to be the first to know when artifacts are released.
31
+
32
+ Quick links:
33
+ - 💬 [Demo](https://molmo.allenai.org/)
34
+ - 📂 [All Models](https://huggingface.co/collections/allenai/molmo-66f379e6fe3b8ef090a8ca19)
35
+ - 📃 [Paper](https://molmo.allenai.org/paper.pdf)
36
+ - 🎥 [Blog with Videos](https://molmo.allenai.org/blog)
37
+
38
+ ## Quick Start
39
+
40
+ To run Molmo, first install dependencies:
41
+
42
+ ```bash
43
+ pip install einops torchvision
44
+ ```
45
+
46
+ Then, follow these steps:
47
+
48
+ ```python
49
+ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
50
+ from PIL import Image
51
+ import requests
52
+ import torch
53
+
54
+ # load the processor
55
+ processor = AutoProcessor.from_pretrained(
56
+ 'allenai/Molmo-72B-0924',
57
+ trust_remote_code=True,
58
+ torch_dtype='auto',
59
+ device_map='auto'
60
+ )
61
+
62
+ # load the model
63
+ model = AutoModelForCausalLM.from_pretrained(
64
+ 'allenai/Molmo-72B-0924',
65
+ trust_remote_code=True,
66
+ torch_dtype='auto',
67
+ device_map='auto'
68
+ )
69
+
70
+ # process the image and text
71
+ inputs = processor.process(
72
+ images=[Image.open(requests.get(""https://picsum.photos/id/237/536/354"", stream=True).raw)],
73
+ text=""Describe this image.""
74
+ )
75
+
76
+ # move inputs to the correct device and make a batch of size 1
77
+ inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
78
+
79
+ # generate output; maximum 200 new tokens; stop generation when <|endoftext|> is generated
80
+ output = model.generate_from_batch(
81
+ inputs,
82
+ GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
83
+ tokenizer=processor.tokenizer
84
+ )
85
+
86
+ # only get generated tokens; decode them to text
87
+ generated_tokens = output[0,inputs['input_ids'].size(1):]
88
+ generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
89
+
90
+ # print the generated text
91
+ print(generated_text)
92
+
93
+ # >>> This image features an adorable black Labrador puppy sitting on a wooden deck.
94
+ # The puppy is positioned in the center of the frame, looking up at the camera...
95
+ ```
96
+
97
+ To make inference more efficient, run with autocast:
98
+
99
+
100
+ ```python
101
+ with torch.autocast(device_type=""cuda"", enabled=True, dtype=torch.bfloat16):
102
+ output = model.generate_from_batch(
103
+ inputs,
104
+ GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
105
+ tokenizer=processor.tokenizer
106
+ )
107
+ ```
108
+
109
+ We did most of our evaluation in this setting (autocast on, but float32 weights)
110
+
111
+ To even further reduce the memory requirements, the model can be run with bfloat16 weights:
112
+
113
+ ```
114
+ model.to(dtype=torch.bfloat16)
115
+ inputs[""images""] = inputs[""images""].to(torch.bfloat16)
116
+ output = model.generate_from_batch(
117
+ inputs,
118
+ GenerationConfig(max_new_tokens=200, stop_strings=""<|endoftext|>""),
119
+ tokenizer=processor.tokenizer
120
+ )
121
+ ```
122
+ Note that we have observed that this can change the output of the model compared to running with float32 weights.
123
+
124
+ ## Evaluations
125
+
126
+ | Model | Average Score on 11 Academic Benchmarks | Human Preference Elo Rating |
127
+ |-----------------------------|-----------------------------------------|-----------------------------|
128
+ | **Molmo 72B (this model)** | **81.2** | **1077** |
129
+ | Molmo 7B-D | 77.3 | 1056 |
130
+ | Molmo 7B-O | 74.6 | 1051 |
131
+ | MolmoE 1B | 68.6 | 1032 |
132
+ | GPT-4o | 78.5 | 1079 |
133
+ | GPT-4V | 71.1 | 1041 |
134
+ | Gemini 1.5 Pro | 78.3 | 1074 |
135
+ | Gemini 1.5 Flash | 75.1 | 1054 |
136
+ | Claude 3.5 Sonnet | 76.7 | 1069 |
137
+ | Claude 3 Opus | 66.4 | 971 |
138
+ | Claude 3 Haiku | 65.3 | 999 |
139
+ | Qwen VL2 72B | 79.4 | 1037 |
140
+ | Qwen VL2 7B | 73.7 | 1025 |
141
+ | Intern VL2 LLAMA 76B | 77.1 | 1018 |
142
+ | Intern VL2 8B | 69.4 | 953 |
143
+ | Pixtral 12B | 69.5 | 1016 |
144
+ | Phi3.5-Vision 4B | 59.7 | 982 |
145
+ | PaliGemma 3B | 50.0 | 937 |
146
+ | LLAVA OneVision 72B | 76.6 | 1051 |
147
+ | LLAVA OneVision 7B | 72.0 | 1024 |
148
+ | Cambrian-1 34B | 66.8 | 953 |
149
+ | Cambrian-1 8B | 63.4 | 952 |
150
+ | xGen - MM - Interleave 4B | 59.5 | 979 |
151
+ | LLAVA-1.5 13B | 43.9 | 960 |
152
+ | LLAVA-1.5 7B | 40.7 | 951 |
153
+
154
+ *Benchmarks: AI2D test, ChartQA test, VQA v2.0 test, DocQA test, InfographicVQA test, TextVQA val, RealWorldQA, MMMU val, MathVista testmini, CountBenchQA, Flickr Count (we collected this new dataset that is significantly harder than CountBenchQA).*
155
+
156
+ ## FAQs
157
+
158
+ ### I'm getting an error a broadcast error when processing images!
159
+
160
+ Your image might not be in RGB format. You can convert it using the following code snippet:
161
+
162
+ ```python
163
+ from PIL import Image
164
+
165
+ image = Image.open(...)
166
+
167
+ if image.mode != ""RGB"":
168
+ image = image.convert(""RGB"")
169
+ ```
170
+
171
+ ### Molmo doesn't work great with transparent images!
172
+
173
+ We received reports that Molmo models might struggle with transparent images.
174
+ For the time being, we recommend adding a white or dark background to your images before passing them to the model. The code snippet below shows how to do this using the Python Imaging Library (PIL):
175
+
176
+ ```python
177
+
178
+ # Load the image
179
+ url = ""...""
180
+ image = Image.open(requests.get(url, stream=True).raw)
181
+
182
+ # Convert the image to grayscale to calculate brightness
183
+ gray_image = image.convert('L') # Convert to grayscale
184
+
185
+ # Calculate the average brightness
186
+ stat = ImageStat.Stat(gray_image)
187
+ average_brightness = stat.mean[0] # Get the average value
188
+
189
+ # Define background color based on brightness (threshold can be adjusted)
190
+ bg_color = (0, 0, 0) if average_brightness > 127 else (255, 255, 255)
191
+
192
+ # Create a new image with the same size as the original, filled with the background color
193
+ new_image = Image.new('RGB', image.size, bg_color)
194
+
195
+ # Paste the original image on top of the background (use image as a mask if needed)
196
+ new_image.paste(image, (0, 0), image if image.mode == 'RGBA' else None)
197
+
198
+ # Now you can pass the new_image to Molmo
199
+ processor = AutoProcessor.from_pretrained(
200
+ 'allenai/Molmo-7B-D-0924',
201
+ trust_remote_code=True,
202
+ torch_dtype='auto',
203
+ device_map='auto'
204
+ )
205
+ ```
206
+
207
+ ## License and Use
208
+
209
+ This model is licensed under Apache 2.0. It is intended for research and educational use.
210
+ For more information, please see our [Responsible Use Guidelines](https://allenai.org/responsible-use).
211
+ The base model used is Qwen2-72B, whose license (the Tongyi Qianwen license) you can find [here](https://huggingface.co/Qwen/Qwen2-72B/blob/main/LICENSE).","{""id"": ""allenai/Molmo-72B-0924"", ""author"": ""allenai"", ""sha"": ""2ca845922396b7a5f7086bfda3fca6b8ecd1c8f3"", ""last_modified"": ""2024-10-10 23:19:15+00:00"", ""created_at"": ""2024-09-25 06:23:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1573, ""downloads_all_time"": null, ""likes"": 284, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""molmo"", ""text-generation"", ""multimodal"", ""olmo"", ""pixmo"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""en"", ""arxiv:2409.17146"", ""base_model:Qwen/Qwen2-72B"", ""base_model:finetune:Qwen/Qwen2-72B"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- Qwen/Qwen2-72B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MolmoForCausalLM""], ""auto_map"": {""AutoConfig"": ""config_molmo.MolmoConfig"", ""AutoModelForCausalLM"": ""modeling_molmo.MolmoForCausalLM""}, ""model_type"": ""molmo"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% for message in messages -%}\n {%- if (loop.index % 2 == 1 and message['role'] != 'user') or \n (loop.index % 2 == 0 and message['role'].lower() != 'assistant') -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {{ message['role'].capitalize() + ': ' + message['content'] }}\n {%- if not loop.last -%}\n {{ ' ' }}\n {%- endif %}\n {%- endfor -%}\n {%- if add_generation_prompt -%}\n {{ ' Assistant:' }}\n {%- endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00056-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00057-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00058-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00059-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00060-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00061-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00062-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00063-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00064-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00065-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00066-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00067-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00068-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00069-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00070-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00071-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00072-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00073-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00074-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00075-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00076-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00077-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00078-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00079-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00080-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00081-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00082-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00083-of-00083.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='molmo_logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessing_molmo.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""TIGER-Lab/MEGA-Bench"", ""Tigasturned/Tigas"", ""Tigasturned/Multimodal"", ""awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen""], ""safetensors"": {""parameters"": {""F32"": 73308285952}, ""total"": 73308285952}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-10 23:19:15+00:00"", ""cardData"": ""base_model:\n- openai/clip-vit-large-patch14-336\n- Qwen/Qwen2-72B\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- multimodal\n- olmo\n- molmo\n- pixmo"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_molmo.MolmoForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""66f3ac64ad897b1a451bf66e"", ""modelId"": ""allenai/Molmo-72B-0924"", ""usedStorage"": 293233270552}",0,,0,,0,"https://huggingface.co/SeanScripts/Molmo-72B-0924-nf4, https://huggingface.co/OPEA/Molmo-72B-0924-int4-sym-inc, https://huggingface.co/OPEA/Molmo-72B-0924-int4-sym-gptq-inc",3,,0,"TIGER-Lab/MEGA-Bench, Tigasturned/Multimodal, Tigasturned/Tigas, awacke1/Leaderboard-Deepseek-Gemini-Grok-GPT-Qwen, huggingface/InferenceSupport/discussions/new?title=allenai/Molmo-72B-0924&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Ballenai%2FMolmo-72B-0924%5D(%2Fallenai%2FMolmo-72B-0924)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",5
MythoMax-L2-13B-GPTQ_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ TheBloke/MythoMax-L2-13B-GPTQ,"---
3
+ language:
4
+ - en
5
+ license: other
6
+ model_name: MythoMax L2 13B
7
+ base_model: Gryphe/MythoMax-L2-13b
8
+ inference: false
9
+ model_creator: Gryphe
10
+ model_type: llama
11
+ prompt_template: '```
12
+
13
+ {system_message}
14
+
15
+
16
+ ### Instruction:
17
+
18
+ {prompt}
19
+
20
+ (For roleplay purposes, I suggest the following - Write <CHAR NAME>''s next reply
21
+ in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)
22
+
23
+
24
+ ### Response:
25
+
26
+
27
+ ```
28
+
29
+ '
30
+ quantized_by: TheBloke
31
+ ---
32
+
33
+ <!-- header start -->
34
+ <!-- 200823 -->
35
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
36
+ <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
37
+ </div>
38
+ <div style=""display: flex; justify-content: space-between; width: 100%;"">
39
+ <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
40
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
41
+ </div>
42
+ <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
43
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
44
+ </div>
45
+ </div>
46
+ <div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
47
+ <hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
48
+ <!-- header end -->
49
+
50
+ # MythoMax L2 13B - GPTQ
51
+ - Model creator: [Gryphe](https://huggingface.co/Gryphe)
52
+ - Original model: [MythoMax L2 13B](https://huggingface.co/Gryphe/MythoMax-L2-13b)
53
+
54
+ <!-- description start -->
55
+ ## Description
56
+
57
+ This repo contains GPTQ model files for [Gryphe's MythoMax L2 13B](https://huggingface.co/Gryphe/MythoMax-L2-13b).
58
+
59
+ Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them.
60
+
61
+ <!-- description end -->
62
+ <!-- repositories-available start -->
63
+ ## Repositories available
64
+
65
+ * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/MythoMax-L2-13B-AWQ)
66
+ * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ)
67
+ * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF)
68
+ * [Gryphe's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/Gryphe/MythoMax-L2-13b)
69
+ <!-- repositories-available end -->
70
+
71
+ <!-- prompt-template start -->
72
+ ## Prompt template: Custom
73
+
74
+ ```
75
+ {system_message}
76
+
77
+ ### Instruction:
78
+ {prompt}
79
+ (For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)
80
+
81
+ ### Response:
82
+
83
+ ```
84
+
85
+
86
+ <!-- prompt-template end -->
87
+ <!-- licensing start -->
88
+ ## Licensing
89
+
90
+ The creator of the source model has listed its license as `other`, and this quantization has therefore used that same license.
91
+
92
+ As this model is based on Llama 2, it is also subject to the Meta Llama 2 license terms, and the license files for that are additionally included. It should therefore be considered as being claimed to be licensed under both licenses. I contacted Hugging Face for clarification on dual licensing but they do not yet have an official position. Should this change, or should Meta provide any feedback on this situation, I will update this section accordingly.
93
+
94
+ In the meantime, any questions regarding licensing, and in particular how these two licenses might interact, should be directed to the original model repository: [Gryphe's MythoMax L2 13B](https://huggingface.co/Gryphe/MythoMax-L2-13b).
95
+ <!-- licensing end -->
96
+ <!-- README_GPTQ.md-provided-files start -->
97
+ ## Provided files and GPTQ parameters
98
+
99
+ Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements.
100
+
101
+ Each separate quant is in a different branch. See below for instructions on fetching from different branches.
102
+
103
+ All recent GPTQ files are made with AutoGPTQ, and all files in non-main branches are made with AutoGPTQ. Files in the `main` branch which were uploaded before August 2023 were made with GPTQ-for-LLaMa.
104
+
105
+ <details>
106
+ <summary>Explanation of GPTQ parameters</summary>
107
+
108
+ - Bits: The bit size of the quantised model.
109
+ - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. ""None"" is the lowest possible value.
110
+ - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now.
111
+ - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy.
112
+ - GPTQ dataset: The dataset used for quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s).
113
+ - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences.
114
+ - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit.
115
+
116
+ </details>
117
+
118
+ | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc |
119
+ | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- |
120
+ | [main](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/main) | 4 | 128 | No | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 7.26 GB | Yes | 4-bit, without Act Order and group size 128g. |
121
+ | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 8.00 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. |
122
+ | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 7.51 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. |
123
+ | [gptq-4bit-128g-actorder_True](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/gptq-4bit-128g-actorder_True) | 4 | 128 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 7.26 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. |
124
+ | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 13.36 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. |
125
+ | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 13.65 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. |
126
+
127
+ <!-- README_GPTQ.md-provided-files end -->
128
+
129
+ <!-- README_GPTQ.md-download-from-branches start -->
130
+ ## How to download from branches
131
+
132
+ - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/MythoMax-L2-13B-GPTQ:main`
133
+ - With Git, you can clone a branch with:
134
+ ```
135
+ git clone --single-branch --branch main https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ
136
+ ```
137
+ - In Python Transformers code, the branch is the `revision` parameter; see below.
138
+ <!-- README_GPTQ.md-download-from-branches end -->
139
+ <!-- README_GPTQ.md-text-generation-webui start -->
140
+ ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
141
+
142
+ Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
143
+
144
+ It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install.
145
+
146
+ 1. Click the **Model tab**.
147
+ 2. Under **Download custom model or LoRA**, enter `TheBloke/MythoMax-L2-13B-GPTQ`.
148
+ - To download from a specific branch, enter for example `TheBloke/MythoMax-L2-13B-GPTQ:main`
149
+ - see Provided Files above for the list of branches for each option.
150
+ 3. Click **Download**.
151
+ 4. The model will start downloading. Once it's finished it will say ""Done"".
152
+ 5. In the top left, click the refresh icon next to **Model**.
153
+ 6. In the **Model** dropdown, choose the model you just downloaded: `MythoMax-L2-13B-GPTQ`
154
+ 7. The model will automatically load, and is now ready for use!
155
+ 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right.
156
+ * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`.
157
+ 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started!
158
+ <!-- README_GPTQ.md-text-generation-webui end -->
159
+
160
+ <!-- README_GPTQ.md-use-from-python start -->
161
+ ## How to use this GPTQ model from Python code
162
+
163
+ ### Install the necessary packages
164
+
165
+ Requires: Transformers 4.32.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later.
166
+
167
+ ```shell
168
+ pip3 install transformers>=4.32.0 optimum>=1.12.0
169
+ pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7
170
+ ```
171
+
172
+ If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead:
173
+
174
+ ```shell
175
+ pip3 uninstall -y auto-gptq
176
+ git clone https://github.com/PanQiWei/AutoGPTQ
177
+ cd AutoGPTQ
178
+ pip3 install .
179
+ ```
180
+
181
+ ### For CodeLlama models only: you must use Transformers 4.33.0 or later.
182
+
183
+ If 4.33.0 is not yet released when you read this, you will need to install Transformers from source:
184
+ ```shell
185
+ pip3 uninstall -y transformers
186
+ pip3 install git+https://github.com/huggingface/transformers.git
187
+ ```
188
+
189
+ ### You can then use the following code
190
+
191
+ ```python
192
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
193
+
194
+ model_name_or_path = ""TheBloke/MythoMax-L2-13B-GPTQ""
195
+ # To use a different branch, change revision
196
+ # For example: revision=""main""
197
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
198
+ device_map=""auto"",
199
+ trust_remote_code=False,
200
+ revision=""main"")
201
+
202
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
203
+
204
+ prompt = ""Tell me about AI""
205
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
206
+
207
+ ### Instruction:
208
+ {prompt}
209
+
210
+ ### Response:
211
+
212
+ '''
213
+
214
+ print(""\n\n*** Generate:"")
215
+
216
+ input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
217
+ output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
218
+ print(tokenizer.decode(output[0]))
219
+
220
+ # Inference can also be done using transformers' pipeline
221
+
222
+ print(""*** Pipeline:"")
223
+ pipe = pipeline(
224
+ ""text-generation"",
225
+ model=model,
226
+ tokenizer=tokenizer,
227
+ max_new_tokens=512,
228
+ do_sample=True,
229
+ temperature=0.7,
230
+ top_p=0.95,
231
+ top_k=40,
232
+ repetition_penalty=1.1
233
+ )
234
+
235
+ print(pipe(prompt_template)[0]['generated_text'])
236
+ ```
237
+ <!-- README_GPTQ.md-use-from-python end -->
238
+
239
+ <!-- README_GPTQ.md-compatibility start -->
240
+ ## Compatibility
241
+
242
+ The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI).
243
+
244
+ [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility.
245
+
246
+ [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models.
247
+ <!-- README_GPTQ.md-compatibility end -->
248
+
249
+ <!-- footer start -->
250
+ <!-- 200823 -->
251
+ ## Discord
252
+
253
+ For further support, and discussions on these models and AI in general, join us at:
254
+
255
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
256
+
257
+ ## Thanks, and how to contribute
258
+
259
+ Thanks to the [chirper.ai](https://chirper.ai) team!
260
+
261
+ Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
262
+
263
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
264
+
265
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
266
+
267
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
268
+
269
+ * Patreon: https://patreon.com/TheBlokeAI
270
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
271
+
272
+ **Special thanks to**: Aemon Algiz.
273
+
274
+ **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov
275
+
276
+
277
+ Thank you to all my generous patrons and donaters!
278
+
279
+ And thank you again to a16z for their generous grant.
280
+
281
+ <!-- footer end -->
282
+
283
+ # Original model card: Gryphe's MythoMax L2 13B
284
+
285
+ An improved, potentially even perfected variant of MythoMix, my [MythoLogic-L2](https://huggingface.co/Gryphe/MythoLogic-L2-13b) and [Huginn](https://huggingface.co/The-Face-Of-Goonery/Huginn-13b-FP16) merge using a highly experimental tensor type merge technique. The main difference with MythoMix is that I allowed more of Huginn to intermingle with the single tensors located at the front and end of a model, resulting in increased coherency across the entire structure.
286
+
287
+ The script and the acccompanying templates I used to produce both can [be found here](https://github.com/Gryphe/BlockMerge_Gradient/tree/main/YAML).
288
+
289
+ This model is proficient at both roleplaying and storywriting due to its unique nature.
290
+
291
+ Quantized models are available from TheBloke: [GGML](https://huggingface.co/TheBloke/MythoMax-L2-13B-GGML) - [GPTQ](https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ) (You're the best!)
292
+
293
+ ## Model details
294
+
295
+ The idea behind this merge is that each layer is composed of several tensors, which are in turn responsible for specific functions. Using MythoLogic-L2's robust understanding as its input and Huginn's extensive writing capability as its output seems to have resulted in a model that exceeds at both, confirming my theory. (More details to be released at a later time)
296
+
297
+ This type of merge is incapable of being illustrated, as each of its 363 tensors had an unique ratio applied to it. As with my prior merges, gradients were part of these ratios to further finetune its behaviour.
298
+
299
+ ## Prompt Format
300
+
301
+ This model primarily uses Alpaca formatting, so for optimal model performance, use:
302
+ ```
303
+ <System prompt/Character Card>
304
+
305
+ ### Instruction:
306
+ Your instruction or question here.
307
+ For roleplay purposes, I suggest the following - Write <CHAR NAME>'s next reply in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.
308
+
309
+ ### Response:
310
+ ```
311
+
312
+ ---
313
+ license: other
314
+ ---
315
+ ","{""id"": ""TheBloke/MythoMax-L2-13B-GPTQ"", ""author"": ""TheBloke"", ""sha"": ""2a6a8d47826271a065b7589f001c359022b4950d"", ""last_modified"": ""2023-09-27 12:45:40+00:00"", ""created_at"": ""2023-08-11 07:27:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5227, ""downloads_all_time"": null, ""likes"": 204, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""en"", ""base_model:Gryphe/MythoMax-L2-13b"", ""base_model:quantized:Gryphe/MythoMax-L2-13b"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""4-bit"", ""gptq"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlicense: other\nmodel_name: MythoMax L2 13B\ninference: false\nmodel_creator: Gryphe\nmodel_type: llama\nprompt_template: '```\n\n {system_message}\n\n\n ### Instruction:\n\n {prompt}\n\n (For roleplay purposes, I suggest the following - Write <CHAR NAME>''s next reply\n in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)\n\n\n ### Response:\n\n\n ```\n\n '\nquantized_by: TheBloke"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""quantization_config"": {""bits"": 4, ""quant_method"": ""gptq""}, ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""</s>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<unk>"", ""lstrip"": false, ""normalized"": false, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='USE_POLICY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantize_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""ming512/llm"", ""DGameHF/MSH-AI"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""I32"": 1600143360, ""F16"": 429347840}, ""total"": 2029491200}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-27 12:45:40+00:00"", ""cardData"": ""base_model: Gryphe/MythoMax-L2-13b\nlanguage:\n- en\nlicense: other\nmodel_name: MythoMax L2 13B\ninference: false\nmodel_creator: Gryphe\nmodel_type: llama\nprompt_template: '```\n\n {system_message}\n\n\n ### Instruction:\n\n {prompt}\n\n (For roleplay purposes, I suggest the following - Write <CHAR NAME>''s next reply\n in a chat between <YOUR NAME> and <CHAR NAME>. Write a single reply only.)\n\n\n ### Response:\n\n\n ```\n\n '\nquantized_by: TheBloke"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""64d5e2dcabf475a808a24d42"", ""modelId"": ""TheBloke/MythoMax-L2-13B-GPTQ"", ""usedStorage"": 57044260131}",0,,0,"https://huggingface.co/paulrouge/test-finetune, https://huggingface.co/paulrouge/test-lora-3",2,,0,,0,"DGameHF/MSH-AI, FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, bhaskartripathi/LLM_Quantization, huggingface/InferenceSupport/discussions/new?title=TheBloke/MythoMax-L2-13B-GPTQ&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2FMythoMax-L2-13B-GPTQ%5D(%2FTheBloke%2FMythoMax-L2-13B-GPTQ)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ming512/llm, ruslanmv/convert_to_gguf, totolook/Quant",9
NeuralBeagle14-7B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
Nous-Capybara-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ NousResearch/Nous-Capybara-34B,"---
3
+ language:
4
+ - eng
5
+ tags:
6
+ - sft
7
+ - Yi-34B-200K
8
+ license:
9
+ - mit
10
+ datasets:
11
+ - LDJnr/Capybara
12
+ - LDJnr/LessWrong-Amplify-Instruct
13
+ - LDJnr/Pure-Dove
14
+ - LDJnr/Verified-Camel
15
+ ---
16
+
17
+ ## **Nous-Capybara-34B V1.9**
18
+
19
+ **This is trained on the Yi-34B model with 200K context length, for 3 epochs on the Capybara dataset!**
20
+
21
+ **First 34B Nous model and first 200K context length Nous model!**
22
+
23
+ The Capybara series is the first Nous collection of models made by fine-tuning mostly on data created by Nous in-house.
24
+
25
+ We leverage our novel data synthesis technique called Amplify-instruct (Paper coming soon), the seed distribution and synthesis method are comprised of a synergistic combination of top performing existing data synthesis techniques and distributions used for SOTA models such as Airoboros, Evol-Instruct(WizardLM), Orca, Vicuna, Know_Logic, Lamini, FLASK and others, all into one lean holistically formed methodology for the dataset and model. The seed instructions used for the start of synthesized conversations are largely based on highly regarded datasets like Airoboros, Know logic, EverythingLM, GPTeacher and even entirely new seed instructions derived from posts on the website LessWrong, as well as being supplemented with certain in-house multi-turn datasets like Dove(A successor to Puffin).
26
+
27
+ While performing great in it's current state, the current dataset used for fine-tuning is entirely contained within 20K training examples, this is 10 times smaller than many similar performing current models, this is signficant when it comes to scaling implications for our next generation of models once we scale our novel syntheiss methods to significantly more examples.
28
+
29
+ ## Process of creation and special thank yous!
30
+
31
+ This model was fine-tuned by Nous Research as part of the Capybara/Amplify-Instruct project led by Luigi D.(LDJ) (Paper coming soon), as well as significant dataset formation contributions by J-Supha and general compute and experimentation management by Jeffrey Q. during ablations.
32
+
33
+ Special thank you to **A16Z** for sponsoring our training, as well as **Yield Protocol** for their support in financially sponsoring resources during the R&D of this project.
34
+
35
+ ## Thank you to those of you that have indirectly contributed!
36
+
37
+ While most of the tokens within Capybara are newly synthsized and part of datasets like Puffin/Dove, we would like to credit the single-turn datasets we leveraged as seeds that are used to generate the multi-turn data as part of the Amplify-Instruct synthesis.
38
+
39
+ The datasets shown in green below are datasets that we sampled from to curate seeds that are used during Amplify-Instruct synthesis for this project.
40
+
41
+ Datasets in Blue are in-house curations that previously existed prior to Capybara.
42
+
43
+ ![Capybara](https://i.imgur.com/yB58OoD.jpeg)
44
+
45
+
46
+ ## Prompt Format
47
+
48
+ The reccomended model usage is:
49
+
50
+
51
+ Prefix: ``USER:``
52
+
53
+ Suffix: ``ASSISTANT:``
54
+
55
+ Stop token: ``</s>``
56
+
57
+
58
+ ## Mutli-Modality!
59
+
60
+ - We currently have a Multi-modal model based on Capybara V1.9!
61
+ https://huggingface.co/NousResearch/Obsidian-3B-V0.5
62
+ it is currently only available as a 3B sized model but larger versions coming!
63
+
64
+
65
+ ## Notable Features:
66
+
67
+ - Uses Yi-34B model as the base which is trained for 200K context length!
68
+
69
+ - Over 60% of the dataset is comprised of multi-turn conversations.(Most models are still only trained for single-turn conversations and no back and forths!)
70
+
71
+ - Over 1,000 tokens average per conversation example! (Most models are trained on conversation data that is less than 300 tokens per example.)
72
+
73
+ - Able to effectively do complex summaries of advanced topics and studies. (trained on hundreds of advanced difficult summary tasks developed in-house)
74
+
75
+ - Ability to recall information upto late 2022 without internet.
76
+
77
+ - Includes a portion of conversational data synthesized from less wrong posts, discussing very in-depth details and philosophies about the nature of reality, reasoning, rationality, self-improvement and related concepts.
78
+
79
+ ## Example Outputs from Capybara V1.9 7B version! (examples from 34B coming soon):
80
+
81
+ ![Capybara](https://img001.prntscr.com/file/img001/T9yYxR1xQSaK_UGdy3t2Cw.png)
82
+
83
+ ![Capybara](https://img001.prntscr.com/file/img001/DQXqmKbsQQOIcgny1eoGNA.png)
84
+
85
+ ![Capybara](https://img001.prntscr.com/file/img001/85X3L9ZxTsOKo3fUQ7GRVA.png)
86
+
87
+ ## Benchmarks! (Coming soon!)
88
+
89
+
90
+ ## Future model sizes
91
+
92
+ Capybara V1.9 now currently has a 3B, 7B and 34B size, and we plan to eventually have a 13B and 70B version in the future, as well as a potential 1B version based on phi-1.5 or Tiny Llama.
93
+
94
+ ## How you can help!
95
+
96
+ In the near future we plan on leveraging the help of domain specific expert volunteers to eliminate any mathematically/verifiably incorrect answers from our training curations.
97
+
98
+ If you have at-least a bachelors in mathematics, physics, biology or chemistry and would like to volunteer even just 30 minutes of your expertise time, please contact LDJ on discord!
99
+
100
+ ## Dataset contamination.
101
+
102
+ We have checked the capybara dataset for contamination for several of the most popular datasets and can confirm that there is no contaminaton found.
103
+
104
+ We leveraged minhash to check for 100%, 99%, 98% and 97% similarity matches between our data and the questions and answers in benchmarks, we found no exact matches, nor did we find any matches down to the 97% similarity level.
105
+
106
+ The following are benchmarks we checked for contamination against our dataset:
107
+
108
+ - HumanEval
109
+
110
+ - AGIEval
111
+
112
+ - TruthfulQA
113
+
114
+ - MMLU
115
+
116
+ - GPT4All
117
+
118
+ ```
119
+ @article{daniele2023amplify-instruct,
120
+ title={Amplify-Instruct: Synthetically Generated Diverse Multi-turn Conversations for Effecient LLM Training.},
121
+ author={Daniele, Luigi and Suphavadeeprasit},
122
+ journal={arXiv preprint arXiv:(comming soon)},
123
+ year={2023}
124
+ }
125
+ ```","{""id"": ""NousResearch/Nous-Capybara-34B"", ""author"": ""NousResearch"", ""sha"": ""6beb706364038ba43350d85734975377236e9546"", ""last_modified"": ""2023-12-28 12:56:35+00:00"", ""created_at"": ""2023-11-13 05:01:41+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 566, ""downloads_all_time"": null, ""likes"": 259, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""sft"", ""Yi-34B-200K"", ""eng"", ""dataset:LDJnr/Capybara"", ""dataset:LDJnr/LessWrong-Amplify-Instruct"", ""dataset:LDJnr/Pure-Dove"", ""dataset:LDJnr/Verified-Camel"", ""license:mit"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- LDJnr/Capybara\n- LDJnr/LessWrong-Amplify-Instruct\n- LDJnr/Pure-Dove\n- LDJnr/Verified-Camel\nlanguage:\n- eng\nlicense:\n- mit\ntags:\n- sft\n- Yi-34B-200K"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_yi.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Twent/NousResearch-Nous-Capybara-34B"", ""DomArruda/NousResearch-Nous-Capybara-34B"", ""zksneil/NousResearch-Nous-Capybara-34B"", ""roppwer/NousResearch-Nous-Capybara-34B"", ""Adeohluwa/NousResearch-Nous-Capybara-34B"", ""iblfe/test""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-28 12:56:35+00:00"", ""cardData"": ""datasets:\n- LDJnr/Capybara\n- LDJnr/LessWrong-Amplify-Instruct\n- LDJnr/Pure-Dove\n- LDJnr/Verified-Camel\nlanguage:\n- eng\nlicense:\n- mit\ntags:\n- sft\n- Yi-34B-200K"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6551adb57490049d625a19fc"", ""modelId"": ""NousResearch/Nous-Capybara-34B"", ""usedStorage"": 137556951568}",0,,0,,0,"https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF, https://huggingface.co/mradermacher/Nous-Capybara-34B-GGUF, https://huggingface.co/TheBloke/Nous-Capybara-34B-AWQ, https://huggingface.co/TheBloke/Nous-Capybara-34B-GPTQ, https://huggingface.co/mradermacher/Nous-Capybara-34B-i1-GGUF",5,,0,"Adeohluwa/NousResearch-Nous-Capybara-34B, DomArruda/NousResearch-Nous-Capybara-34B, Twent/NousResearch-Nous-Capybara-34B, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=NousResearch/Nous-Capybara-34B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNousResearch%2FNous-Capybara-34B%5D(%2FNousResearch%2FNous-Capybara-34B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, iblfe/test, roppwer/NousResearch-Nous-Capybara-34B, zksneil/NousResearch-Nous-Capybara-34B",8
Nous-Hermes-2-Vision-Alpha_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ NousResearch/Nous-Hermes-2-Vision-Alpha,"---
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - mistral
6
+ - instruct
7
+ - finetune
8
+ - chatml
9
+ - gpt4
10
+ - synthetic data
11
+ - distillation
12
+ - multimodal
13
+ - llava
14
+ model-index:
15
+ - name: Nous-Hermes-2-Vision
16
+ results: []
17
+ license: apache-2.0
18
+ language:
19
+ - en
20
+ ---
21
+
22
+ # Nous-Hermes-2-Vision - Mistral 7B
23
+
24
+
25
+ ![image/png](https://camo.githubusercontent.com/b09dc35a93b4b70748fa4e2f307b011cd3d548369dd926ec9a2d3a51f7b3721e/68747470733a2f2f66696c65732e6f616975736572636f6e74656e742e636f6d2f66696c652d6b4437565358734f5649576472624b3042353662686644363f73653d323032332d31322d3033543137253341333425334135385a2673703d722673763d323032312d30382d30362673723d6226727363633d6d61782d6167652533443331353336303030253243253230696d6d757461626c6526727363643d6174746163686d656e7425334225323066696c656e616d6525334439643530333039622d356236342d343964302d623832362d6165316638366132396661382e77656270267369673d50396973694b4679654a54435a47424b526d45494b3043586e6e55676c6334704a583071312532425478666a34253344)
26
+
27
+ *In the tapestry of Greek mythology, Hermes reigns as the eloquent Messenger of the Gods, a deity who deftly bridges the realms through the art of communication. It is in homage to this divine mediator that I name this advanced LLM ""Hermes,"" a system crafted to navigate the complex intricacies of human discourse with celestial finesse.*
28
+
29
+ ## Model description
30
+
31
+ Nous-Hermes-2-Vision stands as a pioneering Vision-Language Model, leveraging advancements from the renowned **OpenHermes-2.5-Mistral-7B** by teknium. This model incorporates two pivotal enhancements, setting it apart as a cutting-edge solution:
32
+
33
+ - **SigLIP-400M Integration**: Diverging from traditional approaches that rely on substantial 3B vision encoders, Nous-Hermes-2-Vision harnesses the formidable SigLIP-400M. This strategic choice not only streamlines the model's architecture, making it more lightweight, but also capitalizes on SigLIP's remarkable capabilities. The result? A remarkable boost in performance that defies conventional expectations.
34
+
35
+ - **Custom Dataset Enriched with Function Calling**: Our model's training data includes a unique feature – function calling. This distinctive addition transforms Nous-Hermes-2-Vision into a **Vision-Language Action Model**. Developers now have a versatile tool at their disposal, primed for crafting a myriad of ingenious automations.
36
+
37
+ This project is led by [qnguyen3](https://twitter.com/stablequan) and [teknium](https://twitter.com/Teknium1).
38
+ ## Training
39
+ ### Dataset
40
+ - 220K from **LVIS-INSTRUCT4V**
41
+ - 60K from **ShareGPT4V**
42
+ - 150K Private **Function Calling Data**
43
+ - 50K conversations from teknium's **OpenHermes-2.5**
44
+
45
+ ## Usage
46
+ ### Prompt Format
47
+ - Like other LLaVA's variants, this model uses Vicuna-V1 as its prompt template. Please refer to `conv_llava_v1` in [this file](https://github.com/qnguyen3/hermes-llava/blob/main/llava/conversation.py)
48
+ - For Gradio UI, please visit this [GitHub Repo](https://github.com/qnguyen3/hermes-llava)
49
+
50
+ ### Function Calling
51
+ - For functiong calling, the message should start with a `<fn_call>` tag. Here is an example:
52
+
53
+ ```json
54
+ <fn_call>{
55
+ ""type"": ""object"",
56
+ ""properties"": {
57
+ ""bus_colors"": {
58
+ ""type"": ""array"",
59
+ ""description"": ""The colors of the bus in the image."",
60
+ ""items"": {
61
+ ""type"": ""string"",
62
+ ""enum"": [""red"", ""blue"", ""green"", ""white""]
63
+ }
64
+ },
65
+ ""bus_features"": {
66
+ ""type"": ""string"",
67
+ ""description"": ""The features seen on the back of the bus.""
68
+ },
69
+ ""bus_location"": {
70
+ ""type"": ""string"",
71
+ ""description"": ""The location of the bus (driving or pulled off to the side)."",
72
+ ""enum"": [""driving"", ""pulled off to the side""]
73
+ }
74
+ }
75
+ }
76
+ ```
77
+
78
+ Output:
79
+ ```json
80
+ {
81
+ ""bus_colors"": [""red"", ""white""],
82
+ ""bus_features"": ""An advertisement"",
83
+ ""bus_location"": ""driving""
84
+ }
85
+ ```
86
+
87
+ ## Example
88
+
89
+ ### Chat
90
+ ![image/png](https://i.ibb.co/tMg8h2t/Screenshot-from-2023-12-04-00-13-59.png)
91
+
92
+ ### Function Calling
93
+ Input image:
94
+
95
+ ![image/png](https://www.slcmenu.com/wp-content/uploads/2017/11/In-N-Out-Burger-menu-2020-982x1024.jpg)
96
+
97
+ Input message:
98
+ ```json
99
+ <fn_call>{
100
+ ""type"": ""object"",
101
+ ""properties"": {
102
+ ""food_list"": {
103
+ ""type"": ""array"",
104
+ ""description"": ""List of all the food"",
105
+ ""items"": {
106
+ ""type"": ""string"",
107
+ }
108
+ },
109
+ }
110
+ }
111
+ ```
112
+
113
+ Output:
114
+ ```json
115
+ {
116
+ ""food_list"": [
117
+ ""Double Burger"",
118
+ ""Cheeseburger"",
119
+ ""French Fries"",
120
+ ""Shakes"",
121
+ ""Coffee""
122
+ ]
123
+ }
124
+ ```
125
+ ","{""id"": ""NousResearch/Nous-Hermes-2-Vision-Alpha"", ""author"": ""NousResearch"", ""sha"": ""cb1e43865b0a23d3eb0bafcb0828e9a4db379ac8"", ""last_modified"": ""2023-12-03 17:25:41+00:00"", ""created_at"": ""2023-11-28 23:18:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 19, ""downloads_all_time"": null, ""likes"": 301, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llava_mistral"", ""text-generation"", ""mistral"", ""instruct"", ""finetune"", ""chatml"", ""gpt4"", ""synthetic data"", ""distillation"", ""multimodal"", ""llava"", ""conversational"", ""en"", ""base_model:mistralai/Mistral-7B-v0.1"", ""base_model:finetune:mistralai/Mistral-7B-v0.1"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: mistralai/Mistral-7B-v0.1\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- mistral\n- instruct\n- finetune\n- chatml\n- gpt4\n- synthetic data\n- distillation\n- multimodal\n- llava\nmodel-index:\n- name: Nous-Hermes-2-Vision\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""Nous-Hermes-2-Vision"", ""results"": []}], ""config"": {""architectures"": [""LlavaMistralForCausalLM""], ""model_type"": ""llava_mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": true}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mm_projector.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00002.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ContentStreamSteve/NousResearch-Nous-Hermes-2-Vision"", ""michaelfarina/NousResearch-Nous-Hermes-2-Vision-Alpha"", ""renamalvs/NousResearch-Nous-Hermes-2-Vision-Alpha"", ""IshanExtreme/NousResearch-Nous-Hermes-2-Vision-Alpha""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-03 17:25:41+00:00"", ""cardData"": ""base_model: mistralai/Mistral-7B-v0.1\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- mistral\n- instruct\n- finetune\n- chatml\n- gpt4\n- synthetic data\n- distillation\n- multimodal\n- llava\nmodel-index:\n- name: Nous-Hermes-2-Vision\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6566755f5ed6ca5f18e0cf79"", ""modelId"": ""NousResearch/Nous-Hermes-2-Vision-Alpha"", ""usedStorage"": 15396264241}",0,,0,,0,,0,,0,"ContentStreamSteve/NousResearch-Nous-Hermes-2-Vision, IshanExtreme/NousResearch-Nous-Hermes-2-Vision-Alpha, huggingface/InferenceSupport/discussions/new?title=NousResearch/Nous-Hermes-2-Vision-Alpha&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNousResearch%2FNous-Hermes-2-Vision-Alpha%5D(%2FNousResearch%2FNous-Hermes-2-Vision-Alpha)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, michaelfarina/NousResearch-Nous-Hermes-2-Vision-Alpha, renamalvs/NousResearch-Nous-Hermes-2-Vision-Alpha",5
Nous-Hermes-2-Yi-34B_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ NousResearch/Nous-Hermes-2-Yi-34B,"---
3
+ base_model: 01-ai/Yi-34B
4
+ tags:
5
+ - yi
6
+ - instruct
7
+ - finetune
8
+ - chatml
9
+ - gpt4
10
+ - synthetic data
11
+ - distillation
12
+ model-index:
13
+ - name: Nous-Hermes-2-Yi-34B
14
+ results: []
15
+ license: apache-2.0
16
+ language:
17
+ - en
18
+ datasets:
19
+ - teknium/OpenHermes-2.5
20
+ ---
21
+
22
+ # Nous Hermes 2 - Yi-34B
23
+
24
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oOqrUeAQejuQOra7fNlzG.png)
25
+
26
+ ## Model description
27
+
28
+ Nous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune.
29
+
30
+ Nous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape.
31
+
32
+ # Table of Contents
33
+ 1. [Example Outputs](#example-outputs)
34
+ - Discussing the Laws of Gravity
35
+ - Create a Flask based FTP Server
36
+ 2. [Benchmark Results](#benchmark-results)
37
+ - GPT4All
38
+ - AGIEval
39
+ - BigBench
40
+ - Averages Compared
41
+ 3. [Prompt Format](#prompt-format)
42
+ 4. [Quantized Models](#quantized-models)
43
+
44
+
45
+ ## Example Outputs
46
+
47
+ ### Discussions about the Law of Gravity:
48
+
49
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/J6Rmdj1VOVN7ry_uGL1PK.png)
50
+
51
+ ### Create an FTP Server in FLASK:
52
+
53
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/B5eu8OvQlg8rINBJGxbB7.png)
54
+
55
+ ## Benchmark Results
56
+
57
+ Nous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes.
58
+
59
+ # Benchmarks Compared
60
+
61
+ ### GPT4All:
62
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/91onORUcUrAqTb3b9mG5e.png)
63
+
64
+ ### AGIEval:
65
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/hqDpMlKpINfDf4PmB31uW.png)
66
+
67
+ ### BigBench:
68
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/uh8mZZg_wZinFysxcfLSF.png)
69
+
70
+ ### TruthfulQA:
71
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/N_cX6YAWjJsvClotuoPdH.png)
72
+
73
+
74
+
75
+ ## GPT4All
76
+ GPT-4All Benchmark Set
77
+ ```
78
+ | Task |Version| Metric |Value | |Stderr|
79
+ |-------------|------:|--------|-----:|---|-----:|
80
+ |arc_challenge| 0|acc |0.6067|_ |0.0143|
81
+ | | |acc_norm|0.6416|_ |0.0140|
82
+ |arc_easy | 0|acc |0.8594|_ |0.0071|
83
+ | | |acc_norm|0.8569|_ |0.0072|
84
+ |boolq | 1|acc |0.8859|_ |0.0056|
85
+ |hellaswag | 0|acc |0.6407|_ |0.0048|
86
+ | | |acc_norm|0.8388|_ |0.0037|
87
+ |openbookqa | 0|acc |0.3520|_ |0.0214|
88
+ | | |acc_norm|0.4760|_ |0.0224|
89
+ |piqa | 0|acc |0.8215|_ |0.0089|
90
+ | | |acc_norm|0.8303|_ |0.0088|
91
+ |winogrande | 0|acc |0.7908|_ |0.0114|
92
+ Average: 76.00%
93
+ ```
94
+
95
+ AGI-Eval
96
+ ```
97
+ | Task |Version| Metric |Value | |Stderr|
98
+ |------------------------------|------:|--------|-----:|---|-----:|
99
+ |agieval_aqua_rat | 0|acc |0.3189|_ |0.0293|
100
+ | | |acc_norm|0.2953|_ |0.0287|
101
+ |agieval_logiqa_en | 0|acc |0.5438|_ |0.0195|
102
+ | | |acc_norm|0.4977|_ |0.0196|
103
+ |agieval_lsat_ar | 0|acc |0.2696|_ |0.0293|
104
+ | | |acc_norm|0.2087|_ |0.0269|
105
+ |agieval_lsat_lr | 0|acc |0.7078|_ |0.0202|
106
+ | | |acc_norm|0.6255|_ |0.0215|
107
+ |agieval_lsat_rc | 0|acc |0.7807|_ |0.0253|
108
+ | | |acc_norm|0.7063|_ |0.0278|
109
+ |agieval_sat_en | 0|acc |0.8689|_ |0.0236|
110
+ | | |acc_norm|0.8447|_ |0.0253|
111
+ |agieval_sat_en_without_passage| 0|acc |0.5194|_ |0.0349|
112
+ | | |acc_norm|0.4612|_ |0.0348|
113
+ |agieval_sat_math | 0|acc |0.4409|_ |0.0336|
114
+ | | |acc_norm|0.3818|_ |0.0328|
115
+ Average: 50.27%
116
+ ```
117
+
118
+ BigBench Reasoning Test
119
+ ```
120
+ | Task |Version| Metric |Value | |Stderr|
121
+ |------------------------------------------------|------:|---------------------|-----:|---|-----:|
122
+ |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|_ |0.0360|
123
+ |bigbench_date_understanding | 0|multiple_choice_grade|0.7263|_ |0.0232|
124
+ |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3953|_ |0.0305|
125
+ |bigbench_geometric_shapes | 0|multiple_choice_grade|0.4457|_ |0.0263|
126
+ | | |exact_str_match |0.0000|_ |0.0000|
127
+ |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2820|_ |0.0201|
128
+ |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2186|_ |0.0156|
129
+ |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4733|_ |0.0289|
130
+ |bigbench_movie_recommendation | 0|multiple_choice_grade|0.5200|_ |0.0224|
131
+ |bigbench_navigate | 0|multiple_choice_grade|0.4910|_ |0.0158|
132
+ |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7495|_ |0.0097|
133
+ |bigbench_ruin_names | 0|multiple_choice_grade|0.5938|_ |0.0232|
134
+ |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.3808|_ |0.0154|
135
+ |bigbench_snarks | 0|multiple_choice_grade|0.8066|_ |0.0294|
136
+ |bigbench_sports_understanding | 0|multiple_choice_grade|0.5101|_ |0.0159|
137
+ |bigbench_temporal_sequences | 0|multiple_choice_grade|0.3850|_ |0.0154|
138
+ |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2160|_ |0.0116|
139
+ |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1634|_ |0.0088|
140
+ |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4733|_ |0.0289|
141
+ Average: 46.69%
142
+ ```
143
+
144
+ TruthfulQA:
145
+ ```
146
+ | Task |Version|Metric|Value | |Stderr|
147
+ |-------------|------:|------|-----:|---|-----:|
148
+ |truthfulqa_mc| 1|mc1 |0.4333|_ |0.0173|
149
+ | | |mc2 |0.6034|_ |0.0149|
150
+ ```
151
+
152
+ Average Score Comparison between OpenHermes-1 Llama-2 13B and OpenHermes-2 Mistral 7B against OpenHermes-2.5 on Mistral-7B:
153
+ ```
154
+ | Bench | OpenHermes-2.5 Mistral 7B | Nous-Hermes-2-Yi-34B | Change/OpenHermes2 |
155
+ |---------------|---------------------------|----------------------|--------------------|
156
+ |GPT4All | 73.12| 76.00| +2.88|
157
+ |---------------------------------------------------------------------------------------|
158
+ |BigBench | 40.96| 46.69| +5.73|
159
+ |---------------------------------------------------------------------------------------|
160
+ |AGI Eval | 43.07| 50.27| +7.20|
161
+ |---------------------------------------------------------------------------------------|
162
+ |TruthfulQA | 53.04| 60.34| +7.30|
163
+ |---------------------------------------------------------------------------------------|
164
+ |Total Score | 210.19| 233.30| +23.11|
165
+ |---------------------------------------------------------------------------------------|
166
+ |Average Total | 52.38| 58.33| +5.95|
167
+ ```
168
+
169
+ # Prompt Format
170
+
171
+ Nous Hermes 2 uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue.
172
+
173
+ System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model.
174
+
175
+ This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns.
176
+
177
+ This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.
178
+
179
+ Prompt with system instruction (Use whatever system prompt you like, this is just an example!):
180
+ ```
181
+ <|im_start|>system
182
+ You are ""Hermes 2"", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|>
183
+ <|im_start|>user
184
+ Hello, who are you?<|im_end|>
185
+ <|im_start|>assistant
186
+ Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|>
187
+ ```
188
+
189
+ This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the
190
+ `tokenizer.apply_chat_template()` method:
191
+
192
+ ```python
193
+ messages = [
194
+ {""role"": ""system"", ""content"": ""You are Hermes 2.""},
195
+ {""role"": ""user"", ""content"": ""Hello, who are you?""}
196
+ ]
197
+ gen_input = tokenizer.apply_chat_template(message, return_tensors=""pt"")
198
+ model.generate(**gen_input)
199
+ ```
200
+
201
+ When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure
202
+ that the model continues with an assistant response.
203
+
204
+ To utilize the prompt format without a system prompt, simply leave the line out.
205
+
206
+ When quantized versions of the model are released, I recommend using LM Studio for chatting with Nous Hermes 2. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box.
207
+ In LM-Studio, simply select the ChatML Prefix on the settings side pane:
208
+
209
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png)
210
+
211
+ # Quantized Models:
212
+
213
+ GGUF: https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B-GGUF
214
+
215
+ [<img src=""https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png"" alt=""Built with Axolotl"" width=""200"" height=""32""/>](https://github.com/OpenAccess-AI-Collective/axolotl)
216
+ ","{""id"": ""NousResearch/Nous-Hermes-2-Yi-34B"", ""author"": ""NousResearch"", ""sha"": ""fcb0a8847e76aea14aba9aa44009d4418ad7c18f"", ""last_modified"": ""2024-02-20 09:17:20+00:00"", ""created_at"": ""2023-12-23 19:47:48+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 179, ""downloads_all_time"": null, ""likes"": 249, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""yi"", ""instruct"", ""finetune"", ""chatml"", ""gpt4"", ""synthetic data"", ""distillation"", ""conversational"", ""en"", ""dataset:teknium/OpenHermes-2.5"", ""base_model:01-ai/Yi-34B"", ""base_model:finetune:01-ai/Yi-34B"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: 01-ai/Yi-34B\ndatasets:\n- teknium/OpenHermes-2.5\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- yi\n- instruct\n- finetune\n- chatml\n- gpt4\n- synthetic data\n- distillation\nmodel-index:\n- name: Nous-Hermes-2-Yi-34B\n results: []"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""Nous-Hermes-2-Yi-34B"", ""results"": []}], ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<unk>"", ""chat_template"": ""{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00015.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""KBaba7/Quant"", ""prometheus-eval/BiGGen-Bench-Leaderboard"", ""bhaskartripathi/LLM_Quantization"", ""JohnPorkEater/TherapyGPT"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""bardsai/performance-llm-board"", ""ogegadavis254/Raxder"", ""Taranosaurus/Tokenizaminer"", ""ogegadavis254/Raxder_AI"", ""ruslanmv/convert_to_gguf"", ""prashantstha17/NousResearch-Nous-Hermes-2-Yi-34B"", ""trueequals/NousResearch-Nous-Hermes-2-Yi-34B"", ""Ondra18cz/NousResearch-Nous-Hermes-2-Yi-34B"", ""Ondra18cz/NousResearch-Nous-Hermes-2-Yi-34B-1.01"", ""JerroldLee/NousResearch-Nous-Hermes-2-Yi-34B"", ""ogegadavis254/Raxder_Coding_Model"", ""tangjianping/NousResearch-Nous-Hermes-2-Yi-34B"", ""ogegadavis254/Bible_Learner_AI"", ""joaopaulopresa/workshop_llm_ufg_chatbot"", ""ogegadavis254/Raxder_Official-Model"", ""ogegadavis254/Coder"", ""ogegadavis254/api"", ""raxder-ai/Raxder"", ""raxder-ai/main"", ""raxder-ai/teresa"", ""ogegadavis254/2311"", ""raxder-ai/model0"", ""ogegadavis254/trial"", ""ogegadavis254/pdfchat"", ""raxder-ai/terry"", ""Xhaheen/AI_safety_testing"", ""Xhaheen/phoeniks_redteamers"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 34388917248}, ""total"": 34388917248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-20 09:17:20+00:00"", ""cardData"": ""base_model: 01-ai/Yi-34B\ndatasets:\n- teknium/OpenHermes-2.5\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- yi\n- instruct\n- finetune\n- chatml\n- gpt4\n- synthetic data\n- distillation\nmodel-index:\n- name: Nous-Hermes-2-Yi-34B\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""658739644bb41498f76f20ce"", ""modelId"": ""NousResearch/Nous-Hermes-2-Yi-34B"", ""usedStorage"": 68778931137}",0,"https://huggingface.co/gbstox/agronomYi-hermes-34B, https://huggingface.co/RedHatAI/Nous-Hermes-2-Yi-34B-pruned2.4, https://huggingface.co/RedHatAI/Nous-Hermes-2-Yi-34B-pruned50",3,,0,"https://huggingface.co/TheBloke/Nous-Hermes-2-Yi-34B-GGUF, https://huggingface.co/TheBloke/Nous-Hermes-2-Yi-34B-AWQ, https://huggingface.co/TheBloke/Nous-Hermes-2-Yi-34B-GPTQ, https://huggingface.co/gbstox/agronomYi-hermes-34B-GGUF, https://huggingface.co/RedHatAI/Nous-Hermes-2-Yi-34B-marlin, https://huggingface.co/mradermacher/Nous-Hermes-2-Yi-34B-i1-GGUF, https://huggingface.co/qwp4w3hyb/Nous-Hermes-2-Yi-34B-iMat-GGUF, https://huggingface.co/tensorblock/Nous-Hermes-2-Yi-34B-GGUF, https://huggingface.co/itlwas/Nous-Hermes-2-Yi-34B-Q4_K_M-GGUF",9,"https://huggingface.co/OpenGVLab/InternVL2-40B, https://huggingface.co/dfurman/HermesBagel-34B-v0.1, https://huggingface.co/gotchu/merge-34b-1, https://huggingface.co/gotchu/merge-34b-2, https://huggingface.co/Bytes512/Queen, https://huggingface.co/OpenGVLab/InternVL-Chat-V1-2, https://huggingface.co/OpenGVLab/InternVL-Chat-V1-2-Plus, https://huggingface.co/wassemgtk/merge-Nous-Hermes-2-Yi-34B-Llama-3-8B-Instruct-12B, https://huggingface.co/wassemgtk/merge-Meta-Llama-3-8B-Instruct-Nous-Hermes-2-Yi-34B, https://huggingface.co/radna/XLA-InternVL2-40B",10,"FallnAI/Quantize-HF-Models, JohnPorkEater/TherapyGPT, K00B404/LLM_Quantization, KBaba7/Quant, Taranosaurus/Tokenizaminer, bardsai/performance-llm-board, bhaskartripathi/LLM_Quantization, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=NousResearch/Nous-Hermes-2-Yi-34B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BNousResearch%2FNous-Hermes-2-Yi-34B%5D(%2FNousResearch%2FNous-Hermes-2-Yi-34B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ogegadavis254/Raxder, ogegadavis254/Raxder_AI, prometheus-eval/BiGGen-Bench-Leaderboard, totolook/Quant",13
217
+ gbstox/agronomYi-hermes-34B,"---
218
+ base_model: NousResearch/Nous-Hermes-2-Yi-34B
219
+ datasets:
220
+ - gbstox/agronomy-resources
221
+ tags:
222
+ - Yi-34B
223
+ - instruct
224
+ - finetune
225
+ - agriculture
226
+ language:
227
+ - en
228
+ ---
229
+
230
+ # AgronomYi-hermes-34B
231
+
232
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/63042a3d7373aacccd896484/TwXNxFw8zSLuWjiYL41Bj.jpeg"" width=""500"" >
233
+
234
+ # About
235
+ AgronomYi is a fine tune of [Nous-Hermes-2-Yi-34B](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B), which uses Yi-34B as the base model.
236
+ I fine tuned this with agronomy data (exclusively textbooks & university extension guides), full training data set [here](https://huggingface.co/datasets/gbstox/agronomy-resources)).
237
+ AgronomYi outperforms all models on the benchmark except for gpt-4, and consistently beats the base model by 7-9% and the hermes fine tune by 3-5%. I take this to mean that even better results can be acheived with additional fine tuning, and larger models tend to perform better in general.
238
+
239
+ # Benchmark comparison
240
+ [benchmark info here](https://github.com/gbstox/agronomy_llm_benchmarking)
241
+
242
+ | Model Name | Score | Date Tested |
243
+ |------------|-------|-------------|
244
+ | gpt-4 | 85.71% | 2024-01-15 |
245
+ | agronomYi-hermes-34b | 79.05% | 2024-01-15 |
246
+ | mistral-medium | 77.14% | 2024-01-15 |
247
+ | nous-hermes-yi-34b | 76.19% | 2024-01-15 |
248
+ | mixtral-8x7b-instruct | 72.38% | 2024-01-15 |
249
+ | claude-2 | 72.38% | 2024-01-15 |
250
+ | yi-34b-chat | 71.43% | 2024-01-15 |
251
+ | norm | 69.52% | 2024-01-17 |
252
+ | openhermes-2.5-mistral-7b | 69.52% | 2024-01-15 |
253
+ | gpt-3.5-turbo | 67.62% | 2024-01-15 |
254
+ | mistral-7b-instruct | 61.9% | 2024-01-15 |
255
+ ","{""id"": ""gbstox/agronomYi-hermes-34B"", ""author"": ""gbstox"", ""sha"": ""6d7cb9cf177130c5a62e794a3e8d31d10e442545"", ""last_modified"": ""2024-01-18 03:19:53+00:00"", ""created_at"": ""2024-01-10 12:41:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 13, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""Yi-34B"", ""instruct"", ""finetune"", ""agriculture"", ""conversational"", ""en"", ""dataset:gbstox/agronomy-resources"", ""base_model:NousResearch/Nous-Hermes-2-Yi-34B"", ""base_model:finetune:NousResearch/Nous-Hermes-2-Yi-34B"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Nous-Hermes-2-Yi-34B\ndatasets:\n- gbstox/agronomy-resources\nlanguage:\n- en\ntags:\n- Yi-34B\n- instruct\n- finetune\n- agriculture"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""chat_template"": ""{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00014-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00015-of-00015.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-18 03:19:53+00:00"", ""cardData"": ""base_model: NousResearch/Nous-Hermes-2-Yi-34B\ndatasets:\n- gbstox/agronomy-resources\nlanguage:\n- en\ntags:\n- Yi-34B\n- instruct\n- finetune\n- agriculture"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""659e908ab81bfb45fb0fd638"", ""modelId"": ""gbstox/agronomYi-hermes-34B"", ""usedStorage"": 137557081577}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=gbstox/agronomYi-hermes-34B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgbstox%2FagronomYi-hermes-34B%5D(%2Fgbstox%2FagronomYi-hermes-34B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
256
+ https://huggingface.co/RedHatAI/Nous-Hermes-2-Yi-34B-pruned2.4,N/A,N/A,1,,0,,0,,0,,0,,0
257
+ RedHatAI/Nous-Hermes-2-Yi-34B-pruned50,"---
258
+ base_model: NousResearch/Nous-Hermes-2-Yi-34B
259
+ inference: true
260
+ model_type: llama
261
+ quantized_by: mgoin
262
+ tags:
263
+ - nm-vllm
264
+ - sparse
265
+ ---
266
+
267
+ ## Nous-Hermes-2-Yi-34B-pruned50
268
+ This repo contains model files for [Nous Hermes 2 - Yi-34B](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B) optimized for [NM-vLLM](https://github.com/neuralmagic/nm-vllm), a high-throughput serving engine for compressed LLMs.
269
+
270
+ This model was pruned with [SparseGPT](https://arxiv.org/abs/2301.00774), using [SparseML](https://github.com/neuralmagic/sparseml).
271
+
272
+ ## Inference
273
+ Install [NM-vLLM](https://github.com/neuralmagic/nm-vllm) for fast inference and low memory-usage:
274
+ ```bash
275
+ pip install nm-vllm[sparse]
276
+ ```
277
+ Run in a Python pipeline for local inference:
278
+ ```python
279
+ from vllm import LLM, SamplingParams
280
+
281
+ model = LLM(""nm-testing/Nous-Hermes-2-Yi-34B-pruned50"", sparsity=""sparse_w16a16"")
282
+ prompt = ""How to make banana bread?""
283
+ formatted_prompt = f""<|im_start|>User:{prompt}\n<|im_start|>assistant:\n""
284
+
285
+ sampling_params = SamplingParams(max_tokens=100, temperature=0)
286
+ outputs = model.generate(formatted_prompt, sampling_params=sampling_params)
287
+ print(outputs[0].outputs[0].text)
288
+ """"""
289
+ To make banana bread, you will need the following ingredients:
290
+
291
+ Ingredients:
292
+ - 2 ripe bananas
293
+ - 1 cup all-purpose flour
294
+ - 1/2 cup sugar
295
+ - 1/2 cup butter
296
+ - 1 teaspoon baking soda
297
+ - 1 teaspoon baking powder
298
+ - 1/2 teaspoon salt
299
+ - 1/2 cup milk
300
+ - 1 teaspoon vanilla extract
301
+
302
+ Instructions:
303
+ 1. Preheat the oven to 3
304
+ """"""
305
+ ```
306
+
307
+ ## Prompt template
308
+
309
+ ```
310
+ <|im_start|>User:{prompt}
311
+ <|im_start|>assistant:
312
+
313
+ ```
314
+
315
+ ## Sparsification
316
+ For details on how this model was sparsified, see the `recipe.yaml` in this repo and follow the instructions below.
317
+
318
+ Install [SparseML](https://github.com/neuralmagic/sparseml):
319
+ ```bash
320
+ git clone https://github.com/neuralmagic/sparseml
321
+ pip install -e ""sparseml[transformers]""
322
+ ```
323
+
324
+ Replace the recipe as you like and run this one-shot compression script to apply SparseGPT:
325
+ ```python
326
+ import sparseml.transformers
327
+
328
+ original_model_name = ""NousResearch/Nous-Hermes-2-Yi-34B""
329
+ calibration_dataset = ""open_platypus""
330
+ output_directory = ""output/""
331
+
332
+ recipe = """"""
333
+ test_stage:
334
+ obcq_modifiers:
335
+ SparseGPTModifier:
336
+ sparsity: 0.5
337
+ sequential_update: true
338
+ mask_structure: 0:0
339
+ targets: ['re:model.layers.\d*$']
340
+ """"""
341
+
342
+ # Apply SparseGPT to the model
343
+ sparseml.transformers.oneshot(
344
+ model=original_model_name,
345
+ dataset=calibration_dataset,
346
+ recipe=recipe,
347
+ output_dir=output_directory,
348
+ )
349
+ ```
350
+
351
+ ## Slack
352
+
353
+ For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)","{""id"": ""RedHatAI/Nous-Hermes-2-Yi-34B-pruned50"", ""author"": ""RedHatAI"", ""sha"": ""c0b5f32528b91b355dd0eaaefb7f32de9a2ecca6"", ""last_modified"": ""2024-03-05 14:03:16+00:00"", ""created_at"": ""2024-01-31 09:43:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""llama"", ""text-generation"", ""nm-vllm"", ""sparse"", ""conversational"", ""arxiv:2301.00774"", ""base_model:NousResearch/Nous-Hermes-2-Yi-34B"", ""base_model:finetune:NousResearch/Nous-Hermes-2-Yi-34B"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: NousResearch/Nous-Hermes-2-Yi-34B\ntags:\n- nm-vllm\n- sparse\ninference: true\nmodel_type: llama\nquantized_by: mgoin"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|startoftext|>"", ""chat_template"": ""{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<unk>"", ""unk_token"": ""<unk>"", ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='recipe.yaml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-05 14:03:16+00:00"", ""cardData"": ""base_model: NousResearch/Nous-Hermes-2-Yi-34B\ntags:\n- nm-vllm\n- sparse\ninference: true\nmodel_type: llama\nquantized_by: mgoin"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65ba163caefe7aac1ac24bbe"", ""modelId"": ""RedHatAI/Nous-Hermes-2-Yi-34B-pruned50"", ""usedStorage"": 137556962657}",1,,0,,0,https://huggingface.co/mradermacher/Nous-Hermes-2-Yi-34B-pruned50-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=RedHatAI/Nous-Hermes-2-Yi-34B-pruned50&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BRedHatAI%2FNous-Hermes-2-Yi-34B-pruned50%5D(%2FRedHatAI%2FNous-Hermes-2-Yi-34B-pruned50)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Open-Sora_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ hpcai-tech/Open-Sora,"---
3
+ license: apache-2.0
4
+ ---
5
+
6
+ We recommend you to use this weights in the [Open-Sora codebase]((https://github.com/hpcaitech/Open-Sora)).
7
+
8
+ ## 📰 News
9
+ - **[2024.06.17]** 🔥 We released **Open-Sora 1.2**, which includes **3D-VAE**, **rectified flow**, and **score condition**.
10
+ [[blog]](https://hpc-ai.com/blog/open-sora-from-hpc-ai-tech-team-continues-open-source-generate-any-16-second-720p-hd-video-with-one-click-model-weights-ready-to-use)
11
+ * **[2024.03.18]** 🔥 We release **Open-Sora 1.0**, a fully open-source project for video generation.
12
+ Open-Sora 1.0 supports a full pipeline of video data preprocessing, training with
13
+ [ColossalAI](https://github.com/hpcaitech/ColossalAI) acceleration,
14
+ inference, and more. Our provided checkpoints can produce 2s 512x512 videos with only 3 days training.
15
+ [[blog]](https://hpc-ai.com/blog/open-sora-v1.0)
16
+ * **[2024.03.04]** Open-Sora provides training with 46% cost reduction.
17
+ [[blog]](https://hpc-ai.com/blog/open-sora)
18
+
19
+
20
+ ## Open-Sora: Democratizing Efficient Video Production for All
21
+ We present [Open-Sora](https://github.com/hpcaitech/Open-Sora), an initiative dedicated to **efficiently** produce high-quality video and make the model,
22
+ tools and contents accessible to all. By embracing **open-source** principles,
23
+ Open-Sora not only democratizes access to advanced video generation techniques, but also offers a
24
+ streamlined and user-friendly platform that simplifies the complexities of video production.
25
+ With Open-Sora, we aim to inspire innovation, creativity, and inclusivity in the realm of content creation.
26
+
27
+ <h4>Open-Sora is still at an early stage and under active development.</h4>
28
+
29
+ More details can be founded at [Open-Sora GitHub](https://github.com/hpcaitech/Open-Sora).
30
+
31
+
32
+ ","{""id"": ""hpcai-tech/Open-Sora"", ""author"": ""hpcai-tech"", ""sha"": ""b7e034189c6ac2d868bc8c672b7cdbd9c6b7ba96"", ""last_modified"": ""2024-06-21 07:57:53+00:00"", ""created_at"": ""2024-03-17 14:04:12+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 185, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='OpenSora-v1-16x256x256.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='OpenSora-v1-HQ-16x256x256.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='OpenSora-v1-HQ-16x512x512.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [""kadirnar/Open-Sora"", ""sandeshrajx/Open-Sora"", ""Taf2023/Open-Sora"", ""denizalpman/Open-Sora""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-21 07:57:53+00:00"", ""cardData"": ""license: apache-2.0"", ""transformersInfo"": null, ""_id"": ""65f6f85cd64a62e375871e2f"", ""modelId"": ""hpcai-tech/Open-Sora"", ""usedStorage"": 9129178554}",0,,0,,0,,0,,0,"Taf2023/Open-Sora, denizalpman/Open-Sora, huggingface/InferenceSupport/discussions/new?title=hpcai-tech/Open-Sora&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bhpcai-tech%2FOpen-Sora%5D(%2Fhpcai-tech%2FOpen-Sora)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kadirnar/Open-Sora, sandeshrajx/Open-Sora",5
Phi-3-mini-4k-instruct_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
The diff for this file is too large to render. See raw diff
 
Phi-3-small-128k-instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ microsoft/Phi-3-small-128k-instruct,"---
3
+ license: mit
4
+ license_link: https://huggingface.co/microsoft/Phi-3-small-128k-instruct/resolve/main/LICENSE
5
+
6
+ language:
7
+ - multilingual
8
+ pipeline_tag: text-generation
9
+ tags:
10
+ - nlp
11
+ - code
12
+ inference:
13
+ parameters:
14
+ temperature: 0.7
15
+ widget:
16
+ - messages:
17
+ - role: user
18
+ content: Can you provide ways to eat combinations of bananas and dragonfruits?
19
+ ---
20
+ 🎉 **Phi-3.5**: [[mini-instruct]](https://huggingface.co/microsoft/Phi-3.5-mini-instruct); [[MoE-instruct]](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) ; [[vision-instruct]](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)
21
+
22
+ ## Model Summary
23
+
24
+ The Phi-3-Small-128K-Instruct is a 7B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.
25
+ The model belongs to the Phi-3 family with the Small version in two variants [8K](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) which is the context length (in tokens) that it can support.
26
+
27
+ The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures.
28
+ When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3-Small-128K-Instruct showcased a robust and state-of-the-art performance among models of the same-size and next-size-up.
29
+
30
+ Resources and Technical Documentation:
31
+
32
+ + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024)
33
+ + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)
34
+ + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)
35
+ + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook)
36
+
37
+ | | Short Context | Long Context |
38
+ | ------- | ------------- | ------------ |
39
+ | Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)|
40
+ | Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)|
41
+ | Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)|
42
+ | Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)|
43
+
44
+
45
+ ## Intended Uses
46
+
47
+ **Primary use cases**
48
+
49
+ The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications which require :
50
+
51
+ 1) Memory/compute constrained environments
52
+ 2) Latency bound scenarios
53
+ 3) Strong reasoning (especially code, math and logic)
54
+
55
+ Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features.
56
+
57
+ **Use case considerations**
58
+
59
+ Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.
60
+
61
+ Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.
62
+
63
+ ## How to Use
64
+
65
+ Phi-3-Small-128K-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:
66
+ * Install tiktoken (0.6.0) ans triton (2.3.0)
67
+
68
+ * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.
69
+
70
+ * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.
71
+
72
+ The current `transformers` version can be verified with: `pip list | grep transformers`.
73
+
74
+ Phi-3-Small-128K-Instruct is also available in [Azure AI](https://ai.azure.com/explore/models?&selectedCollection=phi).
75
+
76
+ ### Tokenizer
77
+
78
+ Phi-3-Small-128K-Instruct supports a vocabulary size of up to `100352` tokens.
79
+
80
+ ### Chat Format
81
+
82
+ Given the nature of the training data, the Phi-3-Small-128K-Instruct model is best suited for prompts using the chat format as follows.
83
+ You can provide the prompt as a question with a generic template as follow:
84
+ ```markdown
85
+ <|endoftext|><|user|>\nQuestion <|end|>\n<|assistant|>
86
+ ```
87
+ For example:
88
+ ```markdown
89
+ <|endoftext|><|user|>
90
+ How to explain Internet for a medieval knight?<|end|>
91
+ <|assistant|>
92
+ ```
93
+
94
+ where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:
95
+
96
+ ```markdown
97
+ <|endoftext|><|user|>
98
+ I am going to Paris, what should I see?<|end|>
99
+ <|assistant|>
100
+ Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""<|end|>
101
+ <|user|>
102
+ What is so great about #1?<|end|>
103
+ <|assistant|>
104
+ ```
105
+
106
+ ### Sample inference code
107
+
108
+ This code snippets show how to get quickly started with running the model on a GPU:
109
+
110
+ ```python
111
+ import torch
112
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
113
+
114
+ torch.random.manual_seed(0)
115
+ model_id = ""microsoft/Phi-3-small-128k-instruct""
116
+ model = AutoModelForCausalLM.from_pretrained(
117
+ model_id,
118
+ torch_dtype=""auto"",
119
+ trust_remote_code=True,
120
+ )
121
+ assert torch.cuda.is_available(), ""This model needs a GPU to run ...""
122
+ device = torch.cuda.current_device()
123
+ model = model.to(device)
124
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
125
+
126
+ messages = [
127
+ {""role"": ""user"", ""content"": ""Can you provide ways to eat combinations of bananas and dragonfruits?""},
128
+ {""role"": ""assistant"", ""content"": ""Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey.""},
129
+ {""role"": ""user"", ""content"": ""What about solving an 2x + 3 = 7 equation?""},
130
+ ]
131
+
132
+ pipe = pipeline(
133
+ ""text-generation"",
134
+ model=model,
135
+ tokenizer=tokenizer,
136
+ device=device
137
+ )
138
+
139
+ generation_args = {
140
+ ""max_new_tokens"": 500,
141
+ ""return_full_text"": False,
142
+ ""temperature"": 0.0,
143
+ ""do_sample"": False,
144
+ }
145
+
146
+ output = pipe(messages, **generation_args)
147
+ print(output[0]['generated_text'])
148
+ ```
149
+
150
+ *Some applications/frameworks might not include a BOS token (`<|endoftext|>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.*
151
+
152
+ ## Responsible AI Considerations
153
+
154
+ Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:
155
+
156
+ + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English.
157
+ + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.
158
+ + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.
159
+ + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.
160
+ + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as ""typing, math, random, collections, datetime, itertools"". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses.
161
+
162
+ Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:
163
+
164
+ + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.
165
+ + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.
166
+ + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).
167
+ + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.
168
+ + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.
169
+
170
+
171
+ ## Training
172
+
173
+ ### Model
174
+
175
+ * Architecture: Phi-3 Small-128K-Instruct has 7B parameters and is a dense decoder-only Transformer model with alternating dense and blocksparse attentions. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.
176
+ * Inputs: Text. It is best suited for prompts using chat format.
177
+ * Context length: 128K tokens
178
+ * GPUs: 1024 H100-80G
179
+ * Training time: 18 days
180
+ * Training data: 4.8T tokens
181
+ * Outputs: Generated text in response to the input
182
+ * Dates: Our models were trained between February and April 2024
183
+ * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.
184
+ * Release dates The model weight is released on May 21, 2024.
185
+
186
+ ### Datasets
187
+
188
+ Our training data includes a wide variety of sources, totaling 4.8 trillion tokens (including 10% multilingual), and is a combination of
189
+ 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code;
190
+ 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.);
191
+ 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.
192
+
193
+ We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report).
194
+
195
+ ## Benchmarks
196
+
197
+ We report the results for Phi-3-Small-128K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mixtral-8x7b, Gemini-Pro, Gemma 7B, Llama-3-8B-Instruct, GPT-3.5-Turbo-1106, and GPT-4-Turbo-1106.
198
+
199
+ All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.
200
+
201
+ As is now standard, we use few-shot prompts to evaluate the models, at temperature 0.
202
+ The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.
203
+ More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.
204
+
205
+ The number of k–shot examples is listed per-benchmark.
206
+
207
+ |Benchmark|Phi-3-Small-128K-Instruct<br>7b|Gemma<br>7B|Mixtral<br>8x7B|Llama-3-Instruct<br>8b|GPT-3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)|
208
+ |---------|-------------------------------|----------|-------------|-------------------------|---------------------------|------------|--------------------------------|
209
+ |AGI Eval<br>5-shot|43.9|42.1|45.2|42.0|48.4|49.0|59.6|
210
+ |MMLU<br>5-shot|75.5|63.6|70.5|66.5|71.4|66.7|84.0|
211
+ |BigBench Hard<br>3-shot|77.6|59.6|69.7|51.5|68.3|75.6|87.7|
212
+ |ANLI<br>7-shot|55.8|48.7|55.2|57.3|58.1|64.2|71.7|
213
+ |HellaSwag<br>5-shot|79.6|49.8|70.4|71.1|78.8|76.2|88.3|
214
+ |ARC Challenge<br>10-shot|90.8|78.3|87.3|82.8|87.4|88.3|95.6|
215
+ |ARC Easy<br>10-shot|97.3|91.4|95.6|93.4|96.3|96.1|98.8|
216
+ |BoolQ<br>2-shot|83.7|66.0|76.6|80.9|79.1|86.4|91.3|
217
+ |CommonsenseQA<br>10-shot|80.8|76.2|78.1|79.0|79.6|81.8|86.7|
218
+ |MedQA<br>2-shot|46.3|49.6|62.2|60.5|63.4|58.2|83.7|
219
+ |OpenBookQA<br>10-shot|87.8|78.6|85.8|82.6|86.0|86.4|93.4|
220
+ |PIQA<br>5-shot|88.1|78.1|86.0|75.7|86.6|86.2|90.1|
221
+ |Social IQA<br>5-shot|78.7|65.5|75.9|73.9|68.3|75.4|81.7|
222
+ |TruthfulQA (MC2)<br>10-shot|69.6|52.1|60.1|63.2|67.7|72.6|85.2|
223
+ |WinoGrande<br>5-shot|80.1|55.6|62.0|65.0|68.8|72.2|86.7|
224
+ |TriviaQA<br>5-shot|66.0|72.3|82.2|67.7|85.8|80.2|73.3|
225
+ |GSM8K Chain of Thought<br>8-shot|87.3|59.8|64.7|77.4|78.1|80.4|94.2|
226
+ |HumanEval<br>0-shot|59.1|34.1|37.8|60.4|62.2|64.4|79.9|
227
+ |MBPP<br>3-shot|70.3|51.5|60.2|67.7|77.8|73.2|86.7|
228
+ |Average|74.6|61.8|69.8|69.4|74.3|75.4|85.2|
229
+
230
+ We take a closer look at different categories across 80 public benchmark datasets at the table below:
231
+
232
+ |Benchmark|Phi-3-Small-128K-Instruct<br>7b|Gemma<br>7B|Mixtral<br>8x7B|Llama-3-Instruct<br>8b|GPT-3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)|
233
+ |--------|--------------------------|--------|-------------|-------------------|-------------------|----------|------------------------|
234
+ |Popular aggregated benchmark|70.6|59.4|66.2|59.9|67.0|67.5|80.5|
235
+ |Reasoning|80.3|69.1|77.0|75.7|78.3|80.4|89.3|
236
+ |Language understanding|67.4|58.4|64.9|65.4|70.4|75.3|81.6|
237
+ |Code generation|60.0|45.6|52.7|56.4|70.4|66.7|76.1|
238
+ |Math|48.1|35.8|40.3|41.1|52.8|50.9|67.1|
239
+ |Factual knowledge|41.7|46.7|58.6|43.1|63.4|54.6|45.9|
240
+ |Multilingual|62.6|63.2|63.4|65.0|69.1|76.5|82.0|
241
+ |Robustness|68.7|38.4|51.0|64.5|69.3|69.7|84.6|
242
+
243
+
244
+ ## Software
245
+
246
+ * [PyTorch](https://github.com/pytorch/pytorch)
247
+ * [DeepSpeed](https://github.com/microsoft/DeepSpeed)
248
+ * [Transformers](https://github.com/huggingface/transformers)
249
+ * [Flash-Attention](https://github.com/HazyResearch/flash-attention)
250
+ * [Tiktoken](https://github.com/openai/tiktoken)
251
+ * [Triton](https://github.com/openai/triton)
252
+
253
+ ## Hardware
254
+ Note that by default, the Phi-3-Small model uses flash attention 2 and Triton blocksparse attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:
255
+ * NVIDIA A100
256
+ * NVIDIA A6000
257
+ * NVIDIA H100
258
+
259
+ If you want to run the model on:
260
+ + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)
261
+
262
+
263
+ ## Cross Platform Support
264
+
265
+ ONNX runtime ecosystem now supports Phi3 small models across platforms and hardware.
266
+ Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA).
267
+ Along with DML, ONNX Runtime provides cross platform support for Phi3 Small across a range of devices CPU, GPU, and mobile.
268
+ Here are some of the optimized configurations we have added:
269
+
270
+ 1. ONNX models for int4 DML: Quantized to int4 via AWQ
271
+ 2. ONNX model for fp16 CUDA
272
+ 3. ONNX model for int4 CUDA: Quantized to int4 via RTN
273
+ 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN
274
+
275
+ ## License
276
+
277
+ The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-small-128k/resolve/main/LICENSE).
278
+
279
+ ## Trademarks
280
+
281
+ This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
282
+ ","{""id"": ""microsoft/Phi-3-small-128k-instruct"", ""author"": ""microsoft"", ""sha"": ""ad85cab62be398dc90203c4377a4ccbf090fbb36"", ""last_modified"": ""2024-09-12 18:24:31+00:00"", ""created_at"": ""2024-05-07 15:29:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5372, ""downloads_all_time"": null, ""likes"": 175, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""phi3small"", ""text-generation"", ""nlp"", ""code"", ""conversational"", ""custom_code"", ""multilingual"", ""license:mit"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-small-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\ninference:\n parameters:\n temperature: 0.7\nwidget:\n- messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?"", ""widget_data"": [{""messages"": [{""role"": ""user"", ""content"": ""Can you provide ways to eat combinations of bananas and dragonfruits?""}]}], ""model_index"": null, ""config"": {""architectures"": [""Phi3SmallForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_phi3_small.Phi3SmallConfig"", ""AutoModelForCausalLM"": ""modeling_phi3_small.Phi3SmallForCausalLM"", ""AutoTokenizer"": ""tokenization_phi3_small.Phi3SmallTokenizer""}, ""model_type"": ""phi3small"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='CODE_OF_CONDUCT.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='NOTICE.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SECURITY.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_phi3_small.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_phi3_small.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='positional_embedding.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_phi3_small.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='triton_blocksparse_attention_layer.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='triton_flash_blocksparse_attn.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""genai-impact/ecologits-calculator"", ""Justinrune/LLaMA-Factory"", ""kenken999/fastapi_django_main_live"", ""seyf1elislam/Phi-3-small-128k-instruct"", ""ashmib/green-city-finder"", ""vuxuanhoan/anychat"", ""Mackintoshj/anychat"", ""mariamgvelesiani/anychat"", ""yalotaibii/anychat"", ""ilovemystagename/anychat"", ""Mister12rayyan/RYanychat"", ""msun415/Llamole""], ""safetensors"": {""parameters"": {""BF16"": 7392272384}, ""total"": 7392272384}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-12 18:24:31+00:00"", ""cardData"": ""language:\n- multilingual\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-small-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\ninference:\n parameters:\n temperature: 0.7\nwidget:\n- messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""663a48d0ee4b504c786fe7b3"", ""modelId"": ""microsoft/Phi-3-small-128k-instruct"", ""usedStorage"": 14784589056}",0,"https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed, https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed, https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed",5,https://huggingface.co/zaddyzaddy/Phi3-small-magic,1,"https://huggingface.co/PrunaAI/microsoft-Phi-3-small-128k-instruct-bnb-8bit-smashed, https://huggingface.co/RedHatAI/Phi-3-small-128k-instruct-quantized.w8a16",2,"https://huggingface.co/fukayatti/Phi3Mix, https://huggingface.co/LilRg/1PRAMMYL-slerp, https://huggingface.co/catrinbaze/phi3-slerp, https://huggingface.co/LilRg/PRYMMAL-ECE-7B-SLERP-V1, https://huggingface.co/LilRg/PRYMMAL-ECE-7B-SLERP-V2",5,"Justinrune/LLaMA-Factory, Mackintoshj/anychat, Mister12rayyan/RYanychat, ashmib/green-city-finder, eduagarcia/open_pt_llm_leaderboard, genai-impact/ecologits-calculator, huggingface/InferenceSupport/discussions/new?title=microsoft/Phi-3-small-128k-instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmicrosoft%2FPhi-3-small-128k-instruct%5D(%2Fmicrosoft%2FPhi-3-small-128k-instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ilovemystagename/anychat, kenken999/fastapi_django_main_live, mariamgvelesiani/anychat, seyf1elislam/Phi-3-small-128k-instruct, vuxuanhoan/anychat, yalotaibii/anychat",13
283
+ PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed,"---
284
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
285
+ base_model: microsoft/Phi-3-small-128k-instruct
286
+ metrics:
287
+ - memory_disk
288
+ - memory_inference
289
+ - inference_latency
290
+ - inference_throughput
291
+ - inference_CO2_emissions
292
+ - inference_energy_consumption
293
+ tags:
294
+ - pruna-ai
295
+ ---
296
+ <!-- header start -->
297
+ <!-- 200823 -->
298
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
299
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
300
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
301
+ </a>
302
+ </div>
303
+ <!-- header end -->
304
+
305
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
306
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
307
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
308
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
309
+
310
+ # Simply make AI models cheaper, smaller, faster, and greener!
311
+
312
+ - Give a thumbs up if you like this model!
313
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
314
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
315
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
316
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
317
+
318
+ ## Results
319
+
320
+ ![image info](./plots.png)
321
+
322
+ **Frequently Asked Questions**
323
+ - ***How does the compression work?*** The model is compressed with hqq.
324
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
325
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
326
+ - ***What is the model format?*** We use safetensors.
327
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
328
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
329
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
330
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
331
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
332
+
333
+ ## Setup
334
+
335
+ You can run the smashed model with these steps:
336
+
337
+ 0. Check requirements from the original repo microsoft/Phi-3-small-128k-instruct installed. In particular, check python, cuda, and transformers versions.
338
+ 1. Make sure that you have installed quantization related packages.
339
+ ```bash
340
+ pip install hqq
341
+ ```
342
+ 2. Load & run the model.
343
+ ```python
344
+ from transformers import AutoModelForCausalLM, AutoTokenizer
345
+ from hqq.engine.hf import HQQModelForCausalLM
346
+ from hqq.models.hf.base import AutoHQQHFModel
347
+
348
+ try:
349
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed"", device_map='auto')
350
+ except:
351
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed"")
352
+ tokenizer = AutoTokenizer.from_pretrained(""microsoft/Phi-3-small-128k-instruct"")
353
+
354
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
355
+
356
+ outputs = model.generate(input_ids, max_new_tokens=216)
357
+ tokenizer.decode(outputs[0])
358
+ ```
359
+
360
+ ## Configurations
361
+
362
+ The configuration info are in `smash_config.json`.
363
+
364
+ ## Credits & License
365
+
366
+ The license of the smashed model follows the license of the original model. Please check the license of the original model microsoft/Phi-3-small-128k-instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
367
+
368
+ ## Want to compress other models?
369
+
370
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
371
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""b92d679634c0ea2189073377ee397f294f8f0228"", ""last_modified"": ""2024-07-15 20:30:37+00:00"", ""created_at"": ""2024-07-15 20:29:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""phi3small"", ""text-generation"", ""pruna-ai"", ""conversational"", ""custom_code"", ""base_model:microsoft/Phi-3-small-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-small-128k-instruct"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3SmallForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-small-128k-instruct--configuration_phi3_small.Phi3SmallConfig"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""AutoTokenizer"": ""microsoft/Phi-3-small-128k-instruct--tokenization_phi3_small.Phi3SmallTokenizer""}, ""model_type"": ""phi3small"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-15 20:30:37+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""669586b26206df6098a69a83"", ""modelId"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed"", ""usedStorage"": 1862171173}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed%5D(%2FPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
372
+ PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed,"---
373
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
374
+ base_model: microsoft/Phi-3-small-128k-instruct
375
+ metrics:
376
+ - memory_disk
377
+ - memory_inference
378
+ - inference_latency
379
+ - inference_throughput
380
+ - inference_CO2_emissions
381
+ - inference_energy_consumption
382
+ tags:
383
+ - pruna-ai
384
+ ---
385
+ <!-- header start -->
386
+ <!-- 200823 -->
387
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
388
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
389
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
390
+ </a>
391
+ </div>
392
+ <!-- header end -->
393
+
394
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
395
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
396
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
397
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
398
+
399
+ # Simply make AI models cheaper, smaller, faster, and greener!
400
+
401
+ - Give a thumbs up if you like this model!
402
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
403
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
404
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
405
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
406
+
407
+ ## Results
408
+
409
+ ![image info](./plots.png)
410
+
411
+ **Frequently Asked Questions**
412
+ - ***How does the compression work?*** The model is compressed with hqq.
413
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
414
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
415
+ - ***What is the model format?*** We use safetensors.
416
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
417
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
418
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
419
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
420
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
421
+
422
+ ## Setup
423
+
424
+ You can run the smashed model with these steps:
425
+
426
+ 0. Check requirements from the original repo microsoft/Phi-3-small-128k-instruct installed. In particular, check python, cuda, and transformers versions.
427
+ 1. Make sure that you have installed quantization related packages.
428
+ ```bash
429
+ pip install hqq
430
+ ```
431
+ 2. Load & run the model.
432
+ ```python
433
+ from transformers import AutoModelForCausalLM, AutoTokenizer
434
+ from hqq.engine.hf import HQQModelForCausalLM
435
+ from hqq.models.hf.base import AutoHQQHFModel
436
+
437
+ try:
438
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed"", device_map='auto')
439
+ except:
440
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed"")
441
+ tokenizer = AutoTokenizer.from_pretrained(""microsoft/Phi-3-small-128k-instruct"")
442
+
443
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
444
+
445
+ outputs = model.generate(input_ids, max_new_tokens=216)
446
+ tokenizer.decode(outputs[0])
447
+ ```
448
+
449
+ ## Configurations
450
+
451
+ The configuration info are in `smash_config.json`.
452
+
453
+ ## Credits & License
454
+
455
+ The license of the smashed model follows the license of the original model. Please check the license of the original model microsoft/Phi-3-small-128k-instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
456
+
457
+ ## Want to compress other models?
458
+
459
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
460
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""6f419e8a4a863e99f80016d20a4850c0afb21ec1"", ""last_modified"": ""2024-07-15 20:31:14+00:00"", ""created_at"": ""2024-07-15 20:29:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""phi3small"", ""text-generation"", ""pruna-ai"", ""conversational"", ""custom_code"", ""base_model:microsoft/Phi-3-small-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-small-128k-instruct"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Phi3SmallForCausalLM""], ""auto_map"": {""AutoConfig"": ""microsoft/Phi-3-small-128k-instruct--configuration_phi3_small.Phi3SmallConfig"", ""AutoModelForCausalLM"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""AutoTokenizer"": ""microsoft/Phi-3-small-128k-instruct--tokenization_phi3_small.Phi3SmallTokenizer""}, ""model_type"": ""phi3small"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-15 20:31:14+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""microsoft/Phi-3-small-128k-instruct--modeling_phi3_small.Phi3SmallForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""669586b68b906774be07e503"", ""modelId"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed"", ""usedStorage"": 2734586405}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/microsoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed%5D(%2FPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
461
+ PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed,"---
462
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
463
+ base_model: microsoft/Phi-3-small-128k-instruct
464
+ metrics:
465
+ - memory_disk
466
+ - memory_inference
467
+ - inference_latency
468
+ - inference_throughput
469
+ - inference_CO2_emissions
470
+ - inference_energy_consumption
471
+ tags:
472
+ - pruna-ai
473
+ ---
474
+ <!-- header start -->
475
+ <!-- 200823 -->
476
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
477
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
478
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
479
+ </a>
480
+ </div>
481
+ <!-- header end -->
482
+
483
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
484
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
485
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
486
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
487
+
488
+ # Simply make AI models cheaper, smaller, faster, and greener!
489
+
490
+ - Give a thumbs up if you like this model!
491
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
492
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
493
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
494
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
495
+
496
+ ## Results
497
+
498
+ ![image info](./plots.png)
499
+
500
+ **Frequently Asked Questions**
501
+ - ***How does the compression work?*** The model is compressed with quanto.
502
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
503
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
504
+ - ***What is the model format?*** We use safetensors.
505
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
506
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
507
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
508
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
509
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
510
+
511
+ ## Setup
512
+
513
+ You can run the smashed model with these steps:
514
+
515
+ 0. Check requirements from the original repo microsoft/Phi-3-small-128k-instruct installed. In particular, check python, cuda, and transformers versions.
516
+ 1. Make sure that you have installed quantization related packages.
517
+ ```bash
518
+ pip install quanto
519
+ ```
520
+ 2. Load & run the model.
521
+ ```python
522
+ from transformers import AutoModelForCausalLM, AutoTokenizer
523
+ IMPORTS
524
+
525
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed"", trust_remote_code=True, device_map='auto')
526
+ tokenizer = AutoTokenizer.from_pretrained(""microsoft/Phi-3-small-128k-instruct"")
527
+
528
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
529
+
530
+ outputs = model.generate(input_ids, max_new_tokens=216)
531
+ tokenizer.decode(outputs[0])
532
+ ```
533
+
534
+ ## Configurations
535
+
536
+ The configuration info are in `smash_config.json`.
537
+
538
+ ## Credits & License
539
+
540
+ The license of the smashed model follows the license of the original model. Please check the license of the original model microsoft/Phi-3-small-128k-instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
541
+
542
+ ## Want to compress other models?
543
+
544
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
545
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""60fb3974293833d7aba210b4cfbdbc44c381e53f"", ""last_modified"": ""2024-07-19 09:28:06+00:00"", ""created_at"": ""2024-07-15 20:35:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:microsoft/Phi-3-small-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-small-128k-instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:28:06+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66958822534f204a2b66354d"", ""modelId"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed"", ""usedStorage"": 15623850318}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed%5D(%2FPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-int8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
546
+ PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed,"---
547
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
548
+ base_model: microsoft/Phi-3-small-128k-instruct
549
+ metrics:
550
+ - memory_disk
551
+ - memory_inference
552
+ - inference_latency
553
+ - inference_throughput
554
+ - inference_CO2_emissions
555
+ - inference_energy_consumption
556
+ tags:
557
+ - pruna-ai
558
+ ---
559
+ <!-- header start -->
560
+ <!-- 200823 -->
561
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
562
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
563
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
564
+ </a>
565
+ </div>
566
+ <!-- header end -->
567
+
568
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
569
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
570
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
571
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
572
+
573
+ # Simply make AI models cheaper, smaller, faster, and greener!
574
+
575
+ - Give a thumbs up if you like this model!
576
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
577
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
578
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
579
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
580
+
581
+ ## Results
582
+
583
+ ![image info](./plots.png)
584
+
585
+ **Frequently Asked Questions**
586
+ - ***How does the compression work?*** The model is compressed with quanto.
587
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
588
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
589
+ - ***What is the model format?*** We use safetensors.
590
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
591
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
592
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
593
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
594
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
595
+
596
+ ## Setup
597
+
598
+ You can run the smashed model with these steps:
599
+
600
+ 0. Check requirements from the original repo microsoft/Phi-3-small-128k-instruct installed. In particular, check python, cuda, and transformers versions.
601
+ 1. Make sure that you have installed quantization related packages.
602
+ ```bash
603
+ pip install quanto
604
+ ```
605
+ 2. Load & run the model.
606
+ ```python
607
+ from transformers import AutoModelForCausalLM, AutoTokenizer
608
+ IMPORTS
609
+
610
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed"", trust_remote_code=True, device_map='auto')
611
+ tokenizer = AutoTokenizer.from_pretrained(""microsoft/Phi-3-small-128k-instruct"")
612
+
613
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
614
+
615
+ outputs = model.generate(input_ids, max_new_tokens=216)
616
+ tokenizer.decode(outputs[0])
617
+ ```
618
+
619
+ ## Configurations
620
+
621
+ The configuration info are in `smash_config.json`.
622
+
623
+ ## Credits & License
624
+
625
+ The license of the smashed model follows the license of the original model. Please check the license of the original model microsoft/Phi-3-small-128k-instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
626
+
627
+ ## Want to compress other models?
628
+
629
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
630
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""558afb26200590f7f1d1e1a17b626d80adea880f"", ""last_modified"": ""2024-07-19 09:21:11+00:00"", ""created_at"": ""2024-07-15 20:36:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:microsoft/Phi-3-small-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-small-128k-instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:21:11+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66958839bb17842a4bbfe393"", ""modelId"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed"", ""usedStorage"": 15623850318}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed%5D(%2FPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-float8bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
631
+ PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed,"---
632
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
633
+ base_model: microsoft/Phi-3-small-128k-instruct
634
+ metrics:
635
+ - memory_disk
636
+ - memory_inference
637
+ - inference_latency
638
+ - inference_throughput
639
+ - inference_CO2_emissions
640
+ - inference_energy_consumption
641
+ tags:
642
+ - pruna-ai
643
+ ---
644
+ <!-- header start -->
645
+ <!-- 200823 -->
646
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
647
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
648
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
649
+ </a>
650
+ </div>
651
+ <!-- header end -->
652
+
653
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
654
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
655
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
656
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
657
+
658
+ # Simply make AI models cheaper, smaller, faster, and greener!
659
+
660
+ - Give a thumbs up if you like this model!
661
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
662
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
663
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
664
+ - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help.
665
+
666
+ ## Results
667
+
668
+ ![image info](./plots.png)
669
+
670
+ **Frequently Asked Questions**
671
+ - ***How does the compression work?*** The model is compressed with quanto.
672
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
673
+ - ***How is the model efficiency evaluated?*** These results were obtained on HARDWARE_NAME with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
674
+ - ***What is the model format?*** We use safetensors.
675
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
676
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
677
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
678
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
679
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
680
+
681
+ ## Setup
682
+
683
+ You can run the smashed model with these steps:
684
+
685
+ 0. Check requirements from the original repo microsoft/Phi-3-small-128k-instruct installed. In particular, check python, cuda, and transformers versions.
686
+ 1. Make sure that you have installed quantization related packages.
687
+ ```bash
688
+ pip install quanto
689
+ ```
690
+ 2. Load & run the model.
691
+ ```python
692
+ from transformers import AutoModelForCausalLM, AutoTokenizer
693
+ IMPORTS
694
+
695
+ model = AutoModelForCausalLM.from_pretrained(""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed"", trust_remote_code=True, device_map='auto')
696
+ tokenizer = AutoTokenizer.from_pretrained(""microsoft/Phi-3-small-128k-instruct"")
697
+
698
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
699
+
700
+ outputs = model.generate(input_ids, max_new_tokens=216)
701
+ tokenizer.decode(outputs[0])
702
+ ```
703
+
704
+ ## Configurations
705
+
706
+ The configuration info are in `smash_config.json`.
707
+
708
+ ## Credits & License
709
+
710
+ The license of the smashed model follows the license of the original model. Please check the license of the original model microsoft/Phi-3-small-128k-instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
711
+
712
+ ## Want to compress other models?
713
+
714
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
715
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""71a3f53d6f1fbba105de178ddc6fb66322258378"", ""last_modified"": ""2024-07-19 09:20:03+00:00"", ""created_at"": ""2024-07-15 20:36:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pruna-ai"", ""base_model:microsoft/Phi-3-small-128k-instruct"", ""base_model:finetune:microsoft/Phi-3-small-128k-instruct"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""chat_template"": ""{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='cl100k_base.tiktoken', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-19 09:20:03+00:00"", ""cardData"": ""base_model: microsoft/Phi-3-small-128k-instruct\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""6695883e69d46521da9ff2e9"", ""modelId"": ""PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed"", ""usedStorage"": 15623850446}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/microsoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed%5D(%2FPrunaAI%2Fmicrosoft-Phi-3-small-128k-instruct-QUANTO-int4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Qwen2-7B_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
Ruyi-Mini-7B_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ IamCreateAI/Ruyi-Mini-7B,"---
3
+ language:
4
+ - ""en""
5
+ tags:
6
+ - video generation
7
+ - CreateAI
8
+ license: apache-2.0
9
+ pipeline_tag: image-to-video
10
+ ---
11
+
12
+
13
+ # Ruyi-Mini-7B
14
+ [Hugging Face](https://huggingface.co/IamCreateAI/Ruyi-Mini-7B) | [Github](https://github.com/IamCreateAI/Ruyi-Models)
15
+
16
+ An image-to-video model by CreateAI.
17
+
18
+ ## Overview
19
+
20
+ Ruyi-Mini-7B is an open-source image-to-video generation model. Starting with an input image, Ruyi produces subsequent video frames at resolutions ranging from 360p to 720p, supporting various aspect ratios and a maximum duration of 5 seconds. Enhanced with motion and camera control, Ruyi offers greater flexibility and creativity in video generation. We are releasing the model under the permissive Apache 2.0 license.
21
+
22
+ ## Update
23
+
24
+ Dec 24, 2024: The diffusion model is updated to fix the black lines when creating 3:4 or 4:5 videos.
25
+
26
+ Dec 16, 2024: Ruyi-mini-7B is released.
27
+
28
+ ## Installation
29
+
30
+ Install code from github:
31
+ ```bash
32
+ git clone https://github.com/IamCreateAI/Ruyi-Models
33
+ cd Ruyi-Models
34
+ pip install -r requirements.txt
35
+ ```
36
+
37
+ ## Running
38
+
39
+ We provide two ways to run our model. The first is directly using python code.
40
+
41
+ ```bash
42
+ python3 predict_i2v.py
43
+ ```
44
+
45
+ Or use ComfyUI wrapper in our [github repo](https://github.com/IamCreateAI/Ruyi-Models).
46
+
47
+ ## Model Architecture
48
+
49
+ Ruyi-Mini-7B is an advanced image-to-video model with about 7.1 billion parameters. The model architecture is modified from [EasyAnimate V4 model](https://github.com/aigc-apps/EasyAnimate), whose transformer module is inherited from [HunyuanDiT](https://github.com/Tencent/HunyuanDiT). It comprises three key components:
50
+ 1. Casual VAE Module: Handles video compression and decompression. It reduces spatial resolution to 1/8 and temporal resolution to 1/4, with each latent pixel is represented in 16 float numbers after compression.
51
+ 2. Diffusion Transformer Module: Generates compressed video data using 3D full attention, with:
52
+ - 2D Normalized-RoPE for spatial dimensions;
53
+ - Sin-cos position embedding for temporal dimensions;
54
+ - DDPM (Denoising Diffusion Probabilistic Models) for model training.
55
+ 3. Ruyi also utilizes a CLIP model to extract the semantic features from the input image to guide the whole video generation. The CLIP features are introduced into the transformer by cross-attention.
56
+
57
+ ## Training Data and Methodology
58
+ The training process is divided into four phases:
59
+ - Phase 1: Pre-training from scratch with ~200M video clips and ~30M images at a 256-resolution, using a batch size of 4096 for 350,000 iterations to achieve full convergence.
60
+ - Phase 2: Fine-tuning with ~60M video clips for multi-scale resolutions (384–512), with a batch size of 1024 for 60,000 iterations.
61
+ - Phase 3: High-quality fine-tuning with ~20M video clips and ~8M images for 384–1024 resolutions, with dynamic batch sizes based on memory and 10,000 iterations.
62
+ - Phase 4: Image-to-video training with ~10M curated high-quality video clips, with dynamic batch sizes based on memory for ~10,000 iterations.
63
+
64
+ ## Hardware Requirements
65
+ The VRAM cost of Ruyi depends on the resolution and duration of the video. Here we list the costs for some typical video size. Tested on single A100.
66
+ |Video Size | 360x480x120 | 384x672x120 | 480x640x120 | 630x1120x120 | 720x1280x120 |
67
+ |:--:|:--:|:--:|:--:|:--:|:--:|
68
+ |Memory | 21.5GB | 25.5GB | 27.7GB | 44.9GB | 54.8GB |
69
+ |Time | 03:10 | 05:29 | 06:49 | 24:18 | 39:02 |
70
+
71
+ For 24GB VRAM cards such as RTX4090, we provide `low_gpu_memory_mode`, under which the model can generate 720x1280x120 videos with a longer time.
72
+
73
+ ## Showcase
74
+
75
+ ### Image to Video Effects
76
+
77
+ <table border=""0"" style=""width: 100%; text-align: left; margin-top: 20px;"">
78
+ <tr>
79
+ <td><video src=""https://github.com/user-attachments/assets/4dedf40b-82f2-454c-9a67-5f4ed243f5ea"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
80
+ <td><video src=""https://github.com/user-attachments/assets/905fef17-8c5d-49b0-a49a-6ae7e212fa07"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
81
+ <td><video src=""https://github.com/user-attachments/assets/20daab12-b510-448a-9491-389d7bdbbf2e"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
82
+ <td><video src=""https://github.com/user-attachments/assets/f1bb0a91-d52a-4611-bac2-8fcf9658cac0"" width=""100%"" style=""max-height:640px; min-height: 200px"" controls autoplay loop></video></td>
83
+ </tr>
84
+ </table>
85
+
86
+ ### Camera Control
87
+
88
+ <table border=""0"" style=""width: 100%; text-align: center; "">
89
+ <tr>
90
+ <td align=center><img src=""https://github.com/user-attachments/assets/8aedcea6-3b8e-4c8b-9fed-9ceca4d41954"" width=""100%"" style=""max-height:240px; min-height: 100px; margin-top: 20%;""></img></td>
91
+ <td align=center><video src=""https://github.com/user-attachments/assets/d9d027d4-0d4f-45f5-9d46-49860b562c69"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
92
+ <td align=center><video src=""https://github.com/user-attachments/assets/7716a67b-1bb8-4d44-b128-346cbc35e4ee"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
93
+ </tr>
94
+ <tr><td>input</td><td>left</td><td>right</td></tr>
95
+ <tr>
96
+ <td align=center><video src=""https://github.com/user-attachments/assets/cc1f1928-cab7-4c4b-90af-928936102e66"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
97
+ <td align=center><video src=""https://github.com/user-attachments/assets/c742ea2c-503a-454f-a61a-10b539100cd9"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
98
+ <td align=center><video src=""https://github.com/user-attachments/assets/442839fa-cc53-4b75-b015-909e44c065e0"" width=""100%"" style=""max-height:360px; min-height: 200px"" controls autoplay loop></video></td>
99
+ </tr>
100
+ <tr><td>static</td><td>up</td><td>down</td></tr>
101
+ </table>
102
+
103
+ ### Motion Amplitude Control
104
+
105
+ <table border=""0"" style=""width: 100%; text-align: left; margin-top: 20px;"">
106
+ <tr>
107
+ <td align=center><video src=""https://github.com/user-attachments/assets/0020bd54-0ff6-46ad-91ee-d9f0df013772"" width=""100%"" controls autoplay loop></video>motion 1</td>
108
+ <td align=center><video src=""https://github.com/user-attachments/assets/d1c26419-54e3-4b86-8ae3-98e12de3022e"" width=""100%"" controls autoplay loop></video>motion 2</td>
109
+ <td align=center><video src=""https://github.com/user-attachments/assets/535147a2-049a-4afc-8d2a-017bc778977e"" width=""100%"" controls autoplay loop></video>motion 3</td>
110
+ <td align=center><video src=""https://github.com/user-attachments/assets/bf893d53-2e11-406f-bb9a-2aacffcecd44"" width=""100%"" controls autoplay loop></video>motion 4</td>
111
+ </tr>
112
+ </table>
113
+
114
+ ## Limitations
115
+ There are some known limitations in this experimental release. Texts, hands and crowded human faces may be distorted. The video may cut to another scene when the model does not know how to generate future frames. We are still working on these problems and will update the model as we make progress.
116
+
117
+
118
+ ## BibTeX
119
+ ```
120
+ @misc{createai2024ruyi,
121
+ title={Ruyi-Mini-7B},
122
+ author={CreateAI Team},
123
+ year={2024},
124
+ publisher = {GitHub},
125
+ journal = {GitHub repository},
126
+ howpublished={\url{https://github.com/IamCreateAI/Ruyi-Models}}
127
+ }
128
+ ```
129
+
130
+ ## Contact Us
131
+
132
+ You are welcomed to join our [Discord](https://discord.com/invite/nueQFQwwGw) or Wechat Group (Scan QR code to add Ruyi Assistant and join the official group) for further discussion!
133
+
134
+ ![wechat](https://github.com/user-attachments/assets/cc5e25c6-34ab-4be1-a59b-7d5789264a9c)","{""id"": ""IamCreateAI/Ruyi-Mini-7B"", ""author"": ""IamCreateAI"", ""sha"": ""fbb88130fc98dbc2e2de2ad510a081e332535566"", ""last_modified"": ""2024-12-25 10:50:32+00:00"", ""created_at"": ""2024-12-16 12:54:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 483, ""downloads_all_time"": null, ""likes"": 609, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""video generation"", ""CreateAI"", ""image-to-video"", ""en"", ""license:apache-2.0"", ""diffusers:RuyiInpaintPipeline"", ""region:us""], ""pipeline_tag"": ""image-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-video\ntags:\n- video generation\n- CreateAI"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""RuyiInpaintPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='embeddings.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='transformer/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""NaqchoAli/testimage""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-25 10:50:32+00:00"", ""cardData"": ""language:\n- en\nlicense: apache-2.0\npipeline_tag: image-to-video\ntags:\n- video generation\n- CreateAI"", ""transformersInfo"": null, ""_id"": ""676022fba63fff7b5bfdc8be"", ""modelId"": ""IamCreateAI/Ruyi-Mini-7B"", ""usedStorage"": 17334852644}",0,"https://huggingface.co/tcoh/CoralAI, https://huggingface.co/stargolf/mon_chat, https://huggingface.co/kabirclark/kabirworld, https://huggingface.co/genoxan/renax",4,https://huggingface.co/S4lv4tr0n/Mandraken,1,,0,,0,"NaqchoAli/testimage, huggingface/InferenceSupport/discussions/new?title=IamCreateAI/Ruyi-Mini-7B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BIamCreateAI%2FRuyi-Mini-7B%5D(%2FIamCreateAI%2FRuyi-Mini-7B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
135
+ tcoh/CoralAI,"---
136
+ license: mit
137
+ datasets:
138
+ - fka/awesome-chatgpt-prompts
139
+ base_model:
140
+ - Qwen/QwQ-32B-Preview
141
+ - IamCreateAI/Ruyi-Mini-7B
142
+ new_version: Qwen/Qwen2.5-Coder-32B-Instruct
143
+ library_name: fasttext
144
+ metrics:
145
+ - accuracy
146
+ ---","{""id"": ""tcoh/CoralAI"", ""author"": ""tcoh"", ""sha"": ""be35b5945b0ce135373cbe6952b62abac630b084"", ""last_modified"": ""2024-12-23 09:48:49+00:00"", ""created_at"": ""2024-12-23 09:41:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""fasttext"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fasttext"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:mit"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/QwQ-32B-Preview\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- accuracy\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-23 09:48:49+00:00"", ""cardData"": ""base_model:\n- Qwen/QwQ-32B-Preview\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlibrary_name: fasttext\nlicense: mit\nmetrics:\n- accuracy\nnew_version: Qwen/Qwen2.5-Coder-32B-Instruct"", ""transformersInfo"": null, ""_id"": ""67693060a5bdfcf3b312f2c8"", ""modelId"": ""tcoh/CoralAI"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=tcoh/CoralAI&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btcoh%2FCoralAI%5D(%2Ftcoh%2FCoralAI)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
147
+ stargolf/mon_chat,"---
148
+ license: apache-2.0
149
+ datasets:
150
+ - fka/awesome-chatgpt-prompts
151
+ language:
152
+ - fr
153
+ base_model:
154
+ - IamCreateAI/Ruyi-Mini-7B
155
+ ---","{""id"": ""stargolf/mon_chat"", ""author"": ""stargolf"", ""sha"": ""4eb82fea70247c0498623dcde76c2d951ef1e55b"", ""last_modified"": ""2024-12-29 21:13:42+00:00"", ""created_at"": ""2024-12-29 21:12:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""fr"", ""dataset:fka/awesome-chatgpt-prompts"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- fr\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-29 21:13:42+00:00"", ""cardData"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- fka/awesome-chatgpt-prompts\nlanguage:\n- fr\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""6771bb3879d1ea02e20531d1"", ""modelId"": ""stargolf/mon_chat"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=stargolf/mon_chat&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bstargolf%2Fmon_chat%5D(%2Fstargolf%2Fmon_chat)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
156
+ kabirclark/kabirworld,"---
157
+ license: llama3.3
158
+ datasets:
159
+ - HuggingFaceFW/fineweb-2
160
+ language:
161
+ - aa
162
+ metrics:
163
+ - bleu
164
+ base_model:
165
+ - meta-llama/Llama-3.3-70B-Instruct
166
+ - IamCreateAI/Ruyi-Mini-7B
167
+ new_version: meta-llama/Llama-3.3-70B-Instruct
168
+ pipeline_tag: text-generation
169
+ library_name: asteroid
170
+ tags:
171
+ - code
172
+ - legal
173
+ ---
174
+ from diffusers import DiffusionPipeline
175
+
176
+ pipe = DiffusionPipeline.from_pretrained(""black-forest-labs/FLUX.1-dev"")
177
+
178
+ prompt = ""Astronaut in a jungle, cold color palette, muted colors, detailed, 8k""
179
+ image = pipe(prompt).images[0]","{""id"": ""kabirclark/kabirworld"", ""author"": ""kabirclark"", ""sha"": ""d8fed2cc5eafb2864e89342839ca9ddb85cbd745"", ""last_modified"": ""2024-12-31 12:11:56+00:00"", ""created_at"": ""2024-12-31 12:08:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""asteroid"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""asteroid"", ""code"", ""legal"", ""text-generation"", ""aa"", ""dataset:HuggingFaceFW/fineweb-2"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:llama3.3"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: llama3.3\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- code\n- legal\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-31 12:11:56+00:00"", ""cardData"": ""base_model:\n- meta-llama/Llama-3.3-70B-Instruct\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- HuggingFaceFW/fineweb-2\nlanguage:\n- aa\nlibrary_name: asteroid\nlicense: llama3.3\nmetrics:\n- bleu\npipeline_tag: text-generation\ntags:\n- code\n- legal\nnew_version: meta-llama/Llama-3.3-70B-Instruct"", ""transformersInfo"": null, ""_id"": ""6773dec0a2128da37adb090e"", ""modelId"": ""kabirclark/kabirworld"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kabirclark/kabirworld&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkabirclark%2Fkabirworld%5D(%2Fkabirclark%2Fkabirworld)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
180
+ genoxan/renax,"---
181
+ license: c-uda
182
+ datasets:
183
+ - argilla/FinePersonas-v0.1
184
+ language:
185
+ - ar
186
+ metrics:
187
+ - charcut_mt
188
+ - bertscore
189
+ base_model:
190
+ - IamCreateAI/Ruyi-Mini-7B
191
+ new_version: IamCreateAI/Ruyi-Mini-7B
192
+ pipeline_tag: text-to-image
193
+ library_name: diffusers
194
+ tags:
195
+ - biology
196
+ ---","{""id"": ""genoxan/renax"", ""author"": ""genoxan"", ""sha"": ""54bf1d8afeb109c49501d14961c78d94ed62601d"", ""last_modified"": ""2025-01-06 03:28:22+00:00"", ""created_at"": ""2025-01-06 03:25:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""biology"", ""text-to-image"", ""ar"", ""dataset:argilla/FinePersonas-v0.1"", ""base_model:IamCreateAI/Ruyi-Mini-7B"", ""base_model:finetune:IamCreateAI/Ruyi-Mini-7B"", ""license:c-uda"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- ar\nlibrary_name: diffusers\nlicense: c-uda\nmetrics:\n- charcut_mt\n- bertscore\npipeline_tag: text-to-image\ntags:\n- biology\nnew_version: IamCreateAI/Ruyi-Mini-7B"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-06 03:28:22+00:00"", ""cardData"": ""base_model:\n- IamCreateAI/Ruyi-Mini-7B\ndatasets:\n- argilla/FinePersonas-v0.1\nlanguage:\n- ar\nlibrary_name: diffusers\nlicense: c-uda\nmetrics:\n- charcut_mt\n- bertscore\npipeline_tag: text-to-image\ntags:\n- biology\nnew_version: IamCreateAI/Ruyi-Mini-7B"", ""transformersInfo"": null, ""_id"": ""677b4d3ec57d565bd33caa08"", ""modelId"": ""genoxan/renax"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=genoxan/renax&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgenoxan%2Frenax%5D(%2Fgenoxan%2Frenax)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
SuperNova-Medius_finetunes_20250426_212347.csv_finetunes_20250426_212347.csv ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ arcee-ai/SuperNova-Medius,"---
3
+ license: apache-2.0
4
+ library_name: transformers
5
+ tags:
6
+ - mergekit
7
+ - merge
8
+ base_model:
9
+ - Qwen/Qwen2.5-14B
10
+ model-index:
11
+ - name: SuperNova-Medius
12
+ results:
13
+ - task:
14
+ type: text-generation
15
+ name: Text Generation
16
+ dataset:
17
+ name: IFEval (0-Shot)
18
+ type: HuggingFaceH4/ifeval
19
+ args:
20
+ num_few_shot: 0
21
+ metrics:
22
+ - type: inst_level_strict_acc and prompt_level_strict_acc
23
+ value: 55.6
24
+ name: strict accuracy
25
+ source:
26
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
27
+ name: Open LLM Leaderboard
28
+ - task:
29
+ type: text-generation
30
+ name: Text Generation
31
+ dataset:
32
+ name: BBH (3-Shot)
33
+ type: BBH
34
+ args:
35
+ num_few_shot: 3
36
+ metrics:
37
+ - type: acc_norm
38
+ value: 49.3
39
+ name: normalized accuracy
40
+ source:
41
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
42
+ name: Open LLM Leaderboard
43
+ - task:
44
+ type: text-generation
45
+ name: Text Generation
46
+ dataset:
47
+ name: MATH Lvl 5 (4-Shot)
48
+ type: hendrycks/competition_math
49
+ args:
50
+ num_few_shot: 4
51
+ metrics:
52
+ - type: exact_match
53
+ value: 32.48
54
+ name: exact match
55
+ source:
56
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
57
+ name: Open LLM Leaderboard
58
+ - task:
59
+ type: text-generation
60
+ name: Text Generation
61
+ dataset:
62
+ name: GPQA (0-shot)
63
+ type: Idavidrein/gpqa
64
+ args:
65
+ num_few_shot: 0
66
+ metrics:
67
+ - type: acc_norm
68
+ value: 17.9
69
+ name: acc_norm
70
+ source:
71
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
72
+ name: Open LLM Leaderboard
73
+ - task:
74
+ type: text-generation
75
+ name: Text Generation
76
+ dataset:
77
+ name: MuSR (0-shot)
78
+ type: TAUR-Lab/MuSR
79
+ args:
80
+ num_few_shot: 0
81
+ metrics:
82
+ - type: acc_norm
83
+ value: 19.19
84
+ name: acc_norm
85
+ source:
86
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
87
+ name: Open LLM Leaderboard
88
+ - task:
89
+ type: text-generation
90
+ name: Text Generation
91
+ dataset:
92
+ name: MMLU-PRO (5-shot)
93
+ type: TIGER-Lab/MMLU-Pro
94
+ config: main
95
+ split: test
96
+ args:
97
+ num_few_shot: 5
98
+ metrics:
99
+ - type: acc
100
+ value: 48.83
101
+ name: accuracy
102
+ source:
103
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius
104
+ name: Open LLM Leaderboard
105
+ ---
106
+
107
+ # Arcee-SuperNova-Medius
108
+
109
+ Arcee-SuperNova-Medius is a 14B parameter language model developed by Arcee.ai, built on the Qwen2.5-14B-Instruct architecture. This unique model is the result of a cross-architecture distillation pipeline, combining knowledge from both the Qwen2.5-72B-Instruct model and the Llama-3.1-405B-Instruct model. By leveraging the strengths of these two distinct architectures, SuperNova-Medius achieves high-quality instruction-following and complex reasoning capabilities in a mid-sized, resource-efficient form.
110
+
111
+ SuperNova-Medius is designed to excel in a variety of business use cases, including customer support, content creation, and technical assistance, while maintaining compatibility with smaller hardware configurations. It’s an ideal solution for organizations looking for advanced capabilities without the high resource requirements of larger models like our SuperNova-70B.
112
+
113
+ ## Distillation Overview
114
+
115
+ The development of SuperNova-Medius involved a sophisticated multi-teacher, cross-architecture distillation process, with the following key steps:
116
+
117
+ 1. **Logit Distillation from Llama 3.1 405B**:
118
+ - We distilled the logits of Llama 3.1 405B using an offline approach.
119
+ - The top K logits for each token were stored to capture most of the probability mass while managing storage requirements.
120
+
121
+ 2. **Cross-Architecture Adaptation**:
122
+ - Using `mergekit-tokensurgeon`, we created a version of Qwen2.5-14B that uses the vocabulary of Llama 3.1 405B.
123
+ - This allowed for the use of Llama 3.1 405B logits in training the Qwen-based model.
124
+
125
+ 3. **Distillation to Qwen Architecture**:
126
+ - The adapted Qwen2.5-14B model was trained using the stored 405B logits as the target.
127
+
128
+ 4. **Parallel Qwen Distillation**:
129
+ - In a separate process, Qwen2-72B was distilled into a 14B model.
130
+
131
+ 5. **Final Fusion and Fine-Tuning**:
132
+ - The Llama-distilled Qwen model's vocabulary was reverted to Qwen vocabulary.
133
+ - After re-aligning the vocabularies, a final fusion and fine-tuning step was conducted, using a specialized dataset from [EvolKit](https://github.com/arcee-ai/EvolKit) to ensure that SuperNova-Medius maintained coherence, fluency, and context understanding across a broad range of tasks.
134
+
135
+ ## Performance Evaluation
136
+
137
+ Below are the benchmark results of SuperNova-Medius compared to similar models in its class:
138
+
139
+ | Model | Average | IFEval | BBH | GPQA | MMLU Pro | MuSR | Math Level 5 |
140
+ | --- | --- | --- | --- | --- | --- | --- | --- |
141
+ | Mistral-Small 2409 | 0.423 | 0.628 | 0.581 | 0.333 | 0.410 | 0.406 | 0.181 |
142
+ | Supernova-Lite | 0.427 | 0.786 | 0.511 | 0.306 | 0.388 | 0.415 | 0.155 |
143
+ | Qwen2.5-14B-Instruct | 0.450 | 0.827 | 0.623 | 0.358 | 0.490 | 0.403 | 0.000 |
144
+ | Supernova-Medius | **0.480** | **0.832** | **0.631** | **0.359** | **0.502** | **0.402** | **0.152** |
145
+
146
+ SuperNova-Medius performs exceptionally well in instruction-following (IFEval) and complex reasoning tasks (BBH), demonstrating its capability to handle a variety of real-world scenarios. It outperforms Qwen2.5-14B and SuperNova-Lite in multiple benchmarks, making it a powerful yet efficient choice for high-quality generative AI applications.
147
+
148
+ ## Model Use Cases
149
+
150
+ Arcee-SuperNova-Medius is suitable for a range of applications, including:
151
+
152
+ - **Customer Support**: With its robust instruction-following and dialogue management capabilities, SuperNova-Medius can handle complex customer interactions, reducing the need for human intervention.
153
+ - **Content Creation**: The model’s advanced language understanding and generation abilities make it ideal for creating high-quality, coherent content across diverse domains.
154
+ - **Technical Assistance**: SuperNova-Medius has a deep reservoir of technical knowledge, making it an excellent assistant for programming, technical documentation, and other expert-level content creation.
155
+
156
+ ## Deployment Options
157
+
158
+ SuperNova-Medius is available for use under the Apache-2.0 license. For those who need even higher performance, the full-size 70B SuperNova model can be accessed via an Arcee-hosted API or for local deployment. To learn more or explore deployment options, please reach out to [sales@arcee.ai](mailto:sales@arcee.ai).
159
+
160
+ ## Technical Specifications
161
+
162
+ - **Model Architecture**: Qwen2.5-14B-Instruct
163
+ - **Distillation Sources**: Qwen2.5-72B-Instruct, Llama-3.1-405B-Instruct
164
+ - **Parameter Count**: 14 billion
165
+ - **Training Dataset**: Custom instruction dataset generated with [EvolKit](https://github.com/arcee-ai/EvolKit)
166
+ - **Distillation Technique**: Multi-architecture offline logit distillation with cross-architecture vocabulary alignment.
167
+
168
+ ## Summary
169
+
170
+ Arcee-SuperNova-Medius provides a unique balance of power, efficiency, and versatility. By distilling knowledge from two top-performing teacher models into a single 14B parameter model, SuperNova-Medius achieves results that rival larger models while maintaining a compact size ideal for practical deployment. Whether for customer support, content creation, or technical assistance, SuperNova-Medius is the perfect choice for organizations looking to leverage advanced language model capabilities in a cost-effective and accessible form.
171
+
172
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
173
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_arcee-ai__SuperNova-Medius)
174
+
175
+ | Metric |Value|
176
+ |-------------------|----:|
177
+ |Avg. |37.22|
178
+ |IFEval (0-Shot) |55.60|
179
+ |BBH (3-Shot) |49.30|
180
+ |MATH Lvl 5 (4-Shot)|32.48|
181
+ |GPQA (0-shot) |17.90|
182
+ |MuSR (0-shot) |19.19|
183
+ |MMLU-PRO (5-shot) |48.83|
184
+
185
+ ","{""id"": ""arcee-ai/SuperNova-Medius"", ""author"": ""arcee-ai"", ""sha"": ""5ebd39abc00a3a93ae88cad11366ee9b89b67e17"", ""last_modified"": ""2024-10-28 19:24:03+00:00"", ""created_at"": ""2024-10-02 06:50:01+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2352, ""downloads_all_time"": null, ""likes"": 208, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""mergekit"", ""merge"", ""conversational"", ""base_model:Qwen/Qwen2.5-14B"", ""base_model:finetune:Qwen/Qwen2.5-14B"", ""license:apache-2.0"", ""model-index"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- Qwen/Qwen2.5-14B\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge\nmodel-index:\n- name: SuperNova-Medius\n results:\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: IFEval (0-Shot)\n type: HuggingFaceH4/ifeval\n args:\n num_few_shot: 0\n metrics:\n - type: inst_level_strict_acc and prompt_level_strict_acc\n value: 55.6\n name: strict accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: BBH (3-Shot)\n type: BBH\n args:\n num_few_shot: 3\n metrics:\n - type: acc_norm\n value: 49.3\n name: normalized accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MATH Lvl 5 (4-Shot)\n type: hendrycks/competition_math\n args:\n num_few_shot: 4\n metrics:\n - type: exact_match\n value: 32.48\n name: exact match\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: GPQA (0-shot)\n type: Idavidrein/gpqa\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 17.9\n name: acc_norm\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MuSR (0-shot)\n type: TAUR-Lab/MuSR\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 19.19\n name: acc_norm\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MMLU-PRO (5-shot)\n type: TIGER-Lab/MMLU-Pro\n config: main\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 48.83\n name: accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": [{""name"": ""SuperNova-Medius"", ""results"": [{""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""IFEval (0-Shot)"", ""type"": ""HuggingFaceH4/ifeval"", ""args"": {""num_few_shot"": 0}}, ""metrics"": [{""type"": ""inst_level_strict_acc and prompt_level_strict_acc"", ""value"": 55.6, ""name"": ""strict accuracy"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}, {""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""BBH (3-Shot)"", ""type"": ""BBH"", ""args"": {""num_few_shot"": 3}}, ""metrics"": [{""type"": ""acc_norm"", ""value"": 49.3, ""name"": ""normalized accuracy"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}, {""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""MATH Lvl 5 (4-Shot)"", ""type"": ""hendrycks/competition_math"", ""args"": {""num_few_shot"": 4}}, ""metrics"": [{""type"": ""exact_match"", ""value"": 32.48, ""name"": ""exact match"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}, {""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""GPQA (0-shot)"", ""type"": ""Idavidrein/gpqa"", ""args"": {""num_few_shot"": 0}}, ""metrics"": [{""type"": ""acc_norm"", ""value"": 17.9, ""name"": ""acc_norm"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}, {""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""MuSR (0-shot)"", ""type"": ""TAUR-Lab/MuSR"", ""args"": {""num_few_shot"": 0}}, ""metrics"": [{""type"": ""acc_norm"", ""value"": 19.19, ""name"": ""acc_norm"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}, {""task"": {""type"": ""text-generation"", ""name"": ""Text Generation""}, ""dataset"": {""name"": ""MMLU-PRO (5-shot)"", ""type"": ""TIGER-Lab/MMLU-Pro"", ""config"": ""main"", ""split"": ""test"", ""args"": {""num_few_shot"": 5}}, ""metrics"": [{""type"": ""acc"", ""value"": 48.83, ""name"": ""accuracy"", ""verified"": false}], ""source"": {""url"": ""https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius"", ""name"": ""Open LLM Leaderboard""}}]}], ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are SuperNova-Lite, an intelligent agent created by Arcee AI. Act as a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are SuperNova-Lite, an intelligent agent created by Arcee AI. Act as a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""logikon/open_cot_leaderboard"", ""cot-leaderboard/open-cot-dashboard"", ""vortex123/SuperNova-Medius"", ""fudyadev/arcee-ai-SuperNova-Medius"", ""LLAI2004/arcee-ai-SuperNova-Medius"", ""H4RDC0123/arcee-ai-SuperNova-Medius"", ""freecad1211/arcee-ai-SuperNova-Medius"", ""atlas0461854/arcee-ai-SuperNova-Medius""], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-28 19:24:03+00:00"", ""cardData"": ""base_model:\n- Qwen/Qwen2.5-14B\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge\nmodel-index:\n- name: SuperNova-Medius\n results:\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: IFEval (0-Shot)\n type: HuggingFaceH4/ifeval\n args:\n num_few_shot: 0\n metrics:\n - type: inst_level_strict_acc and prompt_level_strict_acc\n value: 55.6\n name: strict accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: BBH (3-Shot)\n type: BBH\n args:\n num_few_shot: 3\n metrics:\n - type: acc_norm\n value: 49.3\n name: normalized accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MATH Lvl 5 (4-Shot)\n type: hendrycks/competition_math\n args:\n num_few_shot: 4\n metrics:\n - type: exact_match\n value: 32.48\n name: exact match\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: GPQA (0-shot)\n type: Idavidrein/gpqa\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 17.9\n name: acc_norm\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MuSR (0-shot)\n type: TAUR-Lab/MuSR\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 19.19\n name: acc_norm\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MMLU-PRO (5-shot)\n type: TIGER-Lab/MMLU-Pro\n config: main\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 48.83\n name: accuracy\n verified: false\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius\n name: Open LLM Leaderboard"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66fced1963002e1de073c9b9"", ""modelId"": ""arcee-ai/SuperNova-Medius"", ""usedStorage"": 29558587445}",0,"https://huggingface.co/allura-org/TQ2.5-14B-Neon-v1, https://huggingface.co/mlx-community/SuperNova-Medius-bf16, https://huggingface.co/allura-org/TQ2.5-14B-Sugarquill-v1, https://huggingface.co/Ttimofeyka/Tissint-14B-128k-RP, https://huggingface.co/lucyknada/allura-org_TQ2.5-14B-Sugarquill-v1-exl2",5,https://huggingface.co/ToastyPigeon/supernova-medius-adventure-s-qlora,1,"https://huggingface.co/arcee-ai/SuperNova-Medius-GGUF, https://huggingface.co/bartowski/SuperNova-Medius-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q4_K_S-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q4_K_M-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q5_K_S-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q5_K_M-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q6_K-GGUF, https://huggingface.co/Triangle104/SuperNova-Medius-Q8_0-GGUF, https://huggingface.co/mradermacher/SuperNova-Medius-GGUF, https://huggingface.co/noneUsername/SuperNova-Medius-W8A8-Dynamic-Per-Token, https://huggingface.co/mradermacher/SuperNova-Medius-i1-GGUF, https://huggingface.co/DevQuasar/arcee-ai.SuperNova-Medius-GGUF, https://huggingface.co/mlx-community/SuperNova-Medius-8bit, https://huggingface.co/mlx-community/SuperNova-Medius-4bit, https://huggingface.co/Jianping746/SuperNova-Medius-Q5_K_M-GGUF, https://huggingface.co/tensorblock/SuperNova-Medius-GGUF, https://huggingface.co/nfunctor/SuperNova-Medius-FP8-Dynamic",17,"https://huggingface.co/CultriX/Qwen2.5-14B-Wernicke, https://huggingface.co/Sakalti/Saba2-14B-Preview, https://huggingface.co/djuna/Q2.5-Veltha-14B, https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5, https://huggingface.co/RDson/WomboCombo-R1-Coder-14B-Preview, https://huggingface.co/nlpguy/Lion-Lamarck-v.1.0.8, https://huggingface.co/nlpguy/Lion-Lamarck-v.1.0.9, https://huggingface.co/nlpguy/Lion-Lamarck-v.1.1.0, https://huggingface.co/RDson/CoderO1-14B-Preview, https://huggingface.co/YOYO-AI/Qwen2.5-14B-YOYO-V4-p3, https://huggingface.co/YOYO-AI/Qwen2.5-14B-YOYO-V4, https://huggingface.co/YOYO-AI/ZYH-LLM-Qwen2.5-14B-V4, https://huggingface.co/YOYO-AI/Qwen2.5-14B-YOYO-V5, https://huggingface.co/spacematt/Qwen2.5-Channel-Coder-14B-Instruct, https://huggingface.co/LyraNovaHeart/Celestial-Harmony-14b-v1.0-Experimental-1015, https://huggingface.co/LyraNovaHeart/Celestial-Harmony-14b-v1.0-Experimental-1016, https://huggingface.co/QuantFactory/Celestial-Harmony-14b-v1.0-Experimental-1016-GGUF, https://huggingface.co/mav23/Celestial-Harmony-14b-v1.0-Experimental-1016-GGUF, https://huggingface.co/win10/EVA-Meissa-mini-pro, https://huggingface.co/SzilviaB/SuperNova-Qwen-14B, https://huggingface.co/SzilviaB/Qwen-Supernova-14B, https://huggingface.co/QuantFactory/Q2.5-Veltha-14B-GGUF, https://huggingface.co/QuantFactory/Q2.5-Veltha-14B-0.5-GGUF, https://huggingface.co/mergekit-community/Qwen2.5-14B-Merge, https://huggingface.co/CeeJay79/QC_SN-M_14B, https://huggingface.co/mergekit-community/mergekit-sce-nqekivp, https://huggingface.co/attashe/q-2.5-deepseek-r1-veltha-v0.4, https://huggingface.co/RDson/CoderO1-14B-Preview-v2, https://huggingface.co/mergekit-community/Qwen2.5-14B-della-Nova-dpo, https://huggingface.co/pratiknarola/ZYH-LLM-Qwen2.5-14B-V4-GGUF, https://huggingface.co/hardlyworking/MediusMerge",31,"H4RDC0123/arcee-ai-SuperNova-Medius, LLAI2004/arcee-ai-SuperNova-Medius, atlas0461854/arcee-ai-SuperNova-Medius, cot-leaderboard/open-cot-dashboard, eduagarcia/open_pt_llm_leaderboard, freecad1211/arcee-ai-SuperNova-Medius, fudyadev/arcee-ai-SuperNova-Medius, huggingface/InferenceSupport/discussions/new?title=arcee-ai/SuperNova-Medius&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Barcee-ai%2FSuperNova-Medius%5D(%2Farcee-ai%2FSuperNova-Medius)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, logikon/open_cot_leaderboard, open-llm-leaderboard/open_llm_leaderboard?query=arcee-ai/SuperNova-Medius, vortex123/SuperNova-Medius",11
186
+ https://huggingface.co/allura-org/TQ2.5-14B-Neon-v1,N/A,N/A,1,,0,,0,,0,,0,,0
187
+ mlx-community/SuperNova-Medius-bf16,"---
188
+ base_model: arcee-ai/SuperNova-Medius
189
+ library_name: transformers
190
+ license: apache-2.0
191
+ tags:
192
+ - mergekit
193
+ - merge
194
+ - mlx
195
+ ---
196
+
197
+ # mlx-community/SuperNova-Medius-bf16
198
+
199
+ The Model [mlx-community/SuperNova-Medius-bf16](https://huggingface.co/mlx-community/SuperNova-Medius-bf16) was converted to MLX format from [arcee-ai/SuperNova-Medius](https://huggingface.co/arcee-ai/SuperNova-Medius) using mlx-lm version **0.19.0**.
200
+
201
+ ## Use with mlx
202
+
203
+ ```bash
204
+ pip install mlx-lm
205
+ ```
206
+
207
+ ```python
208
+ from mlx_lm import load, generate
209
+
210
+ model, tokenizer = load(""mlx-community/SuperNova-Medius-bf16"")
211
+
212
+ prompt=""hello""
213
+
214
+ if hasattr(tokenizer, ""apply_chat_template"") and tokenizer.chat_template is not None:
215
+ messages = [{""role"": ""user"", ""content"": prompt}]
216
+ prompt = tokenizer.apply_chat_template(
217
+ messages, tokenize=False, add_generation_prompt=True
218
+ )
219
+
220
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
221
+ ```
222
+ ","{""id"": ""mlx-community/SuperNova-Medius-bf16"", ""author"": ""mlx-community"", ""sha"": ""8965f40a6a8c6f982446c0df64fea18242b04b4d"", ""last_modified"": ""2024-10-13 00:17:03+00:00"", ""created_at"": ""2024-10-11 21:32:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 4, ""downloads_all_time"": null, ""likes"": 2, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""qwen2"", ""text-generation"", ""mergekit"", ""merge"", ""mlx"", ""conversational"", ""base_model:arcee-ai/SuperNova-Medius"", ""base_model:finetune:arcee-ai/SuperNova-Medius"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: arcee-ai/SuperNova-Medius\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge\n- mlx"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are SuperNova-Lite, an intelligent agent created by Arcee AI. Act as a helpful assistant.' }}\n {%- endif %}\n {{- \""\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\"" }}\n {%- for tool in tools %}\n {{- \""\\n\"" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \""\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\""name\\\"": <function-name>, \\\""arguments\\\"": <args-json-object>}\\n</tool_call><|im_end|>\\n\"" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are SuperNova-Lite, an intelligent agent created by Arcee AI. Act as a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \""user\"") or (message.role == \""system\"" and not loop.first) or (message.role == \""assistant\"" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \""assistant\"" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\""name\"": \""' }}\n {{- tool_call.name }}\n {{- '\"", \""arguments\"": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \""tool\"" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \""tool\"") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \""tool\"") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00006.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 14770033664}, ""total"": 14770033664}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-13 00:17:03+00:00"", ""cardData"": ""base_model: arcee-ai/SuperNova-Medius\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- mergekit\n- merge\n- mlx"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67099963705db29c00c5b687"", ""modelId"": ""mlx-community/SuperNova-Medius-bf16"", ""usedStorage"": 29551555772}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mlx-community/SuperNova-Medius-bf16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmlx-community%2FSuperNova-Medius-bf16%5D(%2Fmlx-community%2FSuperNova-Medius-bf16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
223
+ https://huggingface.co/allura-org/TQ2.5-14B-Sugarquill-v1,N/A,N/A,1,,0,,0,,0,,0,,0
224
+ Ttimofeyka/Tissint-14B-128k-RP,"---
225
+ base_model:
226
+ - arcee-ai/SuperNova-Medius
227
+ library_name: transformers
228
+ license: apache-2.0
229
+ tags:
230
+ - unsloth
231
+ - trl
232
+ - sft
233
+ ---
234
+ # Tissint-14B-128k-RP
235
+ ---
236
+ ![Chat Example](https://i.ibb.co/VqgjW3H/image.png)
237
+ ---
238
+ The model is based on [SuperNova-Medius](https://huggingface.co/arcee-ai/SuperNova-Medius) (as the current best 14B model) with a 128k context with an emphasis on creativity, including NSFW and multi-turn conversations.
239
+
240
+ According to my tests, this finetune is much more stable with different samplers than the original model. Censorship and refusals have been reduced.
241
+
242
+ The model started to follow the system prompt better, and the responses in ChatML format with bad samplers stopped reaching 800+ tokens for no reason.
243
+ ### Chat Template - ChatML
244
+ ## Samplers
245
+
246
+ ### Balance
247
+ ```
248
+ Temp : 0.8 - 1.15
249
+ Min P : 0.1
250
+
251
+ Repetition Penalty : 1.02
252
+
253
+ DRY 0.8, 1.75, 2, 2048 (change to 4096 or more if needed)
254
+ ```
255
+
256
+ ### Creativity
257
+ ```
258
+ Temp : 1.15 - 1.5
259
+ Top P : 0.9
260
+
261
+ Repetition Penalty : 1.03
262
+
263
+ DRY 0.82, 1.75, 2, 2048 (change to 4096 or more if needed)
264
+ ```","{""id"": ""Ttimofeyka/Tissint-14B-128k-RP"", ""author"": ""Ttimofeyka"", ""sha"": ""b7ea476bea786a87ec67ed887bf914340052142b"", ""last_modified"": ""2024-11-10 19:04:21+00:00"", ""created_at"": ""2024-11-09 16:36:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 5, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""qwen2"", ""text-generation"", ""unsloth"", ""trl"", ""sft"", ""conversational"", ""base_model:arcee-ai/SuperNova-Medius"", ""base_model:finetune:arcee-ai/SuperNova-Medius"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- arcee-ai/SuperNova-Medius\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- unsloth\n- trl\n- sft"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""Qwen2ForCausalLM""], ""model_type"": ""qwen2"", ""tokenizer_config"": {""bos_token"": null, ""chat_template"": ""{% if 'role' in messages[0] %}{% for message in messages %}{% if message['role'] == 'user' %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% else %}{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00006.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-10 19:04:21+00:00"", ""cardData"": ""base_model:\n- arcee-ai/SuperNova-Medius\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- unsloth\n- trl\n- sft"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""672f8f7948b31810a2690d52"", ""modelId"": ""Ttimofeyka/Tissint-14B-128k-RP"", ""usedStorage"": 59091823738}",1,"https://huggingface.co/Ttimofeyka/Tissint-14B-v1.2-128k-RP, https://huggingface.co/Ttimofeyka/Tissint-14B-v1.1-128k-RP",2,,0,"https://huggingface.co/mradermacher/Tissint-14B-128k-RP-GGUF, https://huggingface.co/mradermacher/Tissint-14B-128k-RP-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=Ttimofeyka/Tissint-14B-128k-RP&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTtimofeyka%2FTissint-14B-128k-RP%5D(%2FTtimofeyka%2FTissint-14B-128k-RP)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
265
+ https://huggingface.co/Ttimofeyka/Tissint-14B-v1.2-128k-RP,N/A,N/A,2,,0,,0,,0,,0,,0
266
+ https://huggingface.co/Ttimofeyka/Tissint-14B-v1.1-128k-RP,N/A,N/A,2,,0,,0,,0,,0,,0
267
+ https://huggingface.co/lucyknada/allura-org_TQ2.5-14B-Sugarquill-v1-exl2,N/A,N/A,1,,0,,0,,0,,0,,0
Van-Gogh-diffusion_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ dallinmackay/Van-Gogh-diffusion,"---
3
+ license: creativeml-openrail-m
4
+ thumbnail: ""https://huggingface.co/dallinmackay/Van-Gogh-diffusion/resolve/main/preview1.jpg""
5
+ tags:
6
+ - stable-diffusion
7
+ - text-to-image
8
+ ---
9
+ ### Van Gogh Diffusion
10
+
11
+ v2 - fixed and working
12
+
13
+ This is a fine-tuned Stable Diffusion model (based on v1.5) trained on screenshots from the film **_Loving Vincent_**. Use the token **_lvngvncnt_** at the BEGINNING of your prompts to use the style (e.g., ""lvngvncnt, beautiful woman at sunset""). This model works best with the Euler sampler (NOT Euler_a).
14
+
15
+ _Download the ckpt file from ""files and versions"" tab into the stable diffusion models folder of your web-ui of choice._
16
+
17
+ If you get too many yellow faces or you dont like the strong blue bias, simply put them in the negative prompt (e.g., ""Yellow face, blue"").
18
+
19
+ --
20
+
21
+ **Characters rendered with this model:**
22
+ ![Character Samples](https://huggingface.co/dallinmackay/Van-Gogh-diffusion/resolve/main/preview1.jpg)
23
+ _prompt and settings used: **lvngvncnt, [person], highly detailed** | **Steps: 25, Sampler: Euler, CFG scale: 6**_
24
+
25
+ --
26
+
27
+ **Landscapes/miscellaneous rendered with this model:**
28
+ ![Landscape Samples](https://huggingface.co/dallinmackay/Van-Gogh-diffusion/resolve/main/preview2.jpg)
29
+ _prompt and settings used: **lvngvncnt, [subject/setting], highly detailed** | **Steps: 25, Sampler: Euler, CFG scale: 6**_
30
+
31
+ --
32
+
33
+ This model was trained with Dreambooth, using TheLastBen colab notebook
34
+ --
35
+ ### 🧨 Diffusers
36
+
37
+ This model can be used just like any other Stable Diffusion model. For more information,
38
+ please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
39
+
40
+ You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
41
+
42
+ ```python
43
+ from diffusers import StableDiffusionPipeline
44
+ import torch
45
+
46
+ model_id = ""dallinmackay/Van-Gogh-diffusion""
47
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
48
+ pipe = pipe.to(""cuda"")
49
+
50
+ prompt = ""lvngvncnt, beautiful woman at sunset""
51
+ image = pipe(prompt).images[0]
52
+
53
+ image.save(""./sunset.png"")
54
+ ```
55
+
56
+ ## License
57
+
58
+ This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
59
+ The CreativeML OpenRAIL License specifies:
60
+
61
+ 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
62
+ 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
63
+ 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
64
+ [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
65
+
66
+ --
67
+ [![Become A Patreon](https://badgen.net/badge/become/a%20patron/F96854)](https://www.patreon.com/dallinmackay)","{""id"": ""dallinmackay/Van-Gogh-diffusion"", ""author"": ""dallinmackay"", ""sha"": ""b02f2d853812d352e9d9023c2aae091878f1ee1e"", ""last_modified"": ""2023-05-16 09:25:20+00:00"", ""created_at"": ""2022-11-05 00:26:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 769, ""downloads_all_time"": null, ""likes"": 282, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""stable-diffusion"", ""text-to-image"", ""license:creativeml-openrail-m"", ""autotrain_compatible"", ""endpoints_compatible"", ""diffusers:StableDiffusionPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\nthumbnail: https://huggingface.co/dallinmackay/Van-Gogh-diffusion/resolve/main/preview1.jpg"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""StableDiffusionPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Van-Gogh-Style-lvngvncnt-v2.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='feature_extractor/preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preview1.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preview2.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='safety_checker/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""anzorq/finetuned_diffusion"", ""Yntec/ToyWorld"", ""Yntec/PrintingPress"", ""Nymbo/image_gen_supaqueue"", ""ennov8ion/3dart-Models"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""sanaweb/text-to-image"", ""BilalSardar/Text-To-image-AllModels"", ""AdamOswald1/finetuned_diffusion"", ""Vedits/6x_Image_diffusion"", ""John6666/Diffusion80XX4sg"", ""ennov8ion/comicbook-models"", ""John6666/PrintingPress4"", ""Nickhilearla135095/maximum_diffusion"", ""SUPERSHANKY/Finetuned_Diffusion_Max"", ""PeepDaSlan9/B2BMGMT_Diffusion60XX"", ""Joeythemonster/Text-To-image-AllModels"", ""Evel/Evel_Space"", ""Daniela-C/6x_Image_diffusion"", ""Dao3/Text-To-image-AllModels"", ""phenixrhyder/PrintingPress"", ""John6666/hfd_test_nostopbutton"", ""mindtube/Diffusion50XX"", ""Nymbo/Diffusion80XX4sg"", ""kaleidoskop-hug/PrintingPress"", ""ennov8ion/stablediffusion-models"", ""ReiPlush64/finetuned_diffusion"", ""John6666/ToyWorld4"", ""Omnibus-archive/Diffusion-Flood"", ""grzegorz2047/fast_diffusion"", ""Alfasign/dIFFU"", ""Nymbo/PrintingPress"", ""Rifd/Sdallmodels"", ""John6666/Diffusion80XX4g"", ""NativeAngels/HuggingfaceDiffusion"", ""ennov8ion/Scifi-Models"", ""ennov8ion/semirealistic-models"", ""Jackflack09/finetuned_diffusion2"", ""ennov8ion/dreamlike-models"", ""ennov8ion/FantasyArt-Models"", ""noes14155/img_All_models"", ""AnimeStudio/anime-models"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""deaf1296/finetuned_diffusion"", ""Lyra121/finetuned_diffusion"", ""hylee/finetuned_diffusion"", ""Rooni/finetuned_diffusion"", ""RhythmRemix14/PrintingPressDx"", ""sohoso/PrintingPress"", ""NativeAngels/ToyWorld"", ""gusvd/dallinmackay-Van-Gogh-diffusion"", ""Harshveer/Finetuned_Diffusion_Max"", ""gato001k1/maximum_diffusion0k"", ""mindtube/maximum_multiplier_places"", ""animeartstudio/ArtModels"", ""Binettebob22/fast_diffusion2"", ""pikto/Elite-Scifi-Models"", ""PixelistStudio/3dart-Models"", ""devmiles/zexxiai"", ""Nymbo/Diffusion60XX"", ""TheKitten/Images"", ""ennov8ion/anime-models"", ""jordonpeter01/Diffusion70"", ""Joeythemonster/finetuned_diffusion"", ""kazumak/sdspace"", ""prikmmo9/finetuned_diffusion"", ""georgesX/finetuned_diffusion"", ""kerria/finetuned_diffusion"", ""ygtrfed/pp-web-ui"", ""ivanmeyer/Finetuned_Diffusion_Max"", ""ennov8ion/Landscapes-models"", ""ucmisanddisinfo/thisApp"", ""johann22/chat-diffusion"", ""Omnibus/2-button-Story-Board"", ""manivannan7gp/Words2Image"", ""ennov8ion/art-models"", ""ennov8ion/photo-models"", ""ennov8ion/art-multi"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""burman-ai/Printing-Press"", ""sk16er/ghibli_creator"", ""amanullahmenjli/tifusion"", ""BudakMabuk/finetuned_diffusion"", ""WagnerPPA/finetuned_diffusion"", ""refugelu/finetuned_diffusion"", ""chantysothy/Evel_Space"", ""Frei/finetuned_diffusion"", ""shibili/finetuned_diffusion"", ""Camjo11/Evel_Space"", ""Senpaisora6/Evel_Space"", ""fractalneuron/Evel_Space"", ""emkdb/Evel_Space"", ""slayrrc/finetuned_diffusion""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-05-16 09:25:20+00:00"", ""cardData"": ""license: creativeml-openrail-m\ntags:\n- stable-diffusion\n- text-to-image\nthumbnail: https://huggingface.co/dallinmackay/Van-Gogh-diffusion/resolve/main/preview1.jpg"", ""transformersInfo"": null, ""_id"": ""6365ada125aa3bd177d62244"", ""modelId"": ""dallinmackay/Van-Gogh-diffusion"", ""usedStorage"": 21348820339}",0,,0,,0,,0,,0,"CompVis/stable-diffusion-license, Daniela-C/6x_Image_diffusion, Joeythemonster/Text-To-image-AllModels, John6666/Diffusion80XX4sg, John6666/PrintingPress4, John6666/hfd_test_nostopbutton, Nymbo/image_gen_supaqueue, PeepDaSlan9/B2BMGMT_Diffusion60XX, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/blitz_diffusion, huggingface/InferenceSupport/discussions/new?title=dallinmackay/Van-Gogh-diffusion&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdallinmackay%2FVan-Gogh-diffusion%5D(%2Fdallinmackay%2FVan-Gogh-diffusion)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kaleidoskop-hug/PrintingPress, phenixrhyder/NSFW-ToyWorld",14
Wayfarer-12B_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ LatitudeGames/Wayfarer-12B,"---
3
+ license: apache-2.0
4
+ language:
5
+ - en
6
+ base_model:
7
+ - mistralai/Mistral-Nemo-Base-2407
8
+ tags:
9
+ - text adventure
10
+ - roleplay
11
+ library_name: transformers
12
+ ---
13
+
14
+ ![image/jpeg](wayfarer.jpg)
15
+
16
+ # Wayfarer-12B
17
+ We’ve heard over and over from AI Dungeon players that modern AI models are too nice, never letting them fail or die. While it may be good for a chatbot to be nice and helpful, great stories and games aren’t all rainbows and unicorns. They have conflict, tension, and even death. These create real stakes and consequences for characters and the journeys they go on.
18
+
19
+ Similarly, great games need opposition. You must be able to fail, die, and may even have to start over. This makes games more fun!
20
+
21
+ However, the vast majority of AI models, through alignment RLHF, have been trained away from darkness, violence, or conflict, preventing them from fulfilling this role. To give our players better options, we decided to train our own model to fix these issues.
22
+
23
+ Wayfarer is an adventure role-play model specifically trained to give players a challenging and dangerous experience. We thought they would like it, but since releasing it on AI Dungeon, players have reacted even more positively than we expected.
24
+
25
+ Because they loved it so much, we’ve decided to open-source the model so anyone can experience unforgivingly brutal AI adventures! Anyone can download the model to run locally.
26
+
27
+ Or if you want to easily try this model for free, you can do so at [https://aidungeon.com](https://aidungeon.com/).
28
+
29
+ We plan to continue improving and open-sourcing similar models, so please share any and all feedback on how we can improve model behavior. Below we share more details on how Wayfarer was created.
30
+
31
+ [Quantized GGUF weights can be downloaded here.](https://huggingface.co/LatitudeGames/Wayfarer-12B-GGUF)
32
+
33
+ ## Model details
34
+ Wayfarer 12B was trained on top of the Nemo base model using a two-stage SFT approach, with the first stage containing 180K chat-formatted instruct data instances and the second stage consisting of a 50/50 mixture of synthetic 8k context text adventures and roleplay experiences.
35
+
36
+ ## How It Was Made
37
+ Wayfarer’s text adventure data was generated by simulating playthroughs of published character creator scenarios from AI Dungeon. Five distinct user archetypes played through each scenario, whose character starts all varied in faction, location, etc. to generate five unique samples.
38
+
39
+ One language model played the role of narrator, with the other playing the user. They were blind to each other’s underlying logic, so the user was actually capable of surprising the narrator with their choices. Each simulation was allowed to run for 8k tokens or until the main character died.
40
+
41
+ Wayfarer’s general emotional sentiment is one of pessimism, where failure is frequent and plot armor does not exist. This serves to counter the positivity bias so inherent in our language models nowadays.
42
+
43
+ ## Inference
44
+ The Nemo architecture is known for being sensitive to higher temperatures, so the following settings are recommended as a baseline. Nothing stops you from experimenting with these, of course.
45
+
46
+ ```
47
+ ""temperature"": 0.8,
48
+ ""repetition_penalty"": 1.05,
49
+ ""min_p"": 0.025
50
+ ```
51
+
52
+ ## Limitations
53
+ Wayfarer was trained exclusively on second-person present tense data (using “you”) in a narrative style. Other styles will work as well but may produce suboptimal results.
54
+
55
+ Additionally, Wayfarer was trained exclusively on single-turn chat data.
56
+
57
+ ## Prompt Format
58
+ ChatML was used for both finetuning stages.
59
+
60
+ ```
61
+ <|im_start|>system
62
+ You're a masterful storyteller and gamemaster. Write in second person present tense (You are), crafting vivid, engaging narratives with authority and confidence.<|im_end|>
63
+ <|im_start|>user
64
+ > You peer into the darkness.<|im_end|>
65
+ <|im_start|>assistant
66
+ You have been eaten by a grue.
67
+
68
+ GAME OVER<|im_end|>
69
+ ```
70
+
71
+ ## Credits
72
+ Thanks to [Gryphe Padar](https://huggingface.co/Gryphe) for collaborating on this finetune with us!","{""id"": ""LatitudeGames/Wayfarer-12B"", ""author"": ""LatitudeGames"", ""sha"": ""d447dc49b22394fef3ad9b804037c02634aa8ff3"", ""last_modified"": ""2025-01-17 14:49:47+00:00"", ""created_at"": ""2025-01-03 09:33:16+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 15086, ""downloads_all_time"": null, ""likes"": 193, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""mistral"", ""text-generation"", ""text adventure"", ""roleplay"", ""conversational"", ""en"", ""base_model:mistralai/Mistral-Nemo-Base-2407"", ""base_model:finetune:mistralai/Mistral-Nemo-Base-2407"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- mistralai/Mistral-Nemo-Base-2407\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- text adventure\n- roleplay"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""MistralForCausalLM""], ""model_type"": ""mistral"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00005.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='wayfarer.jpg', size=None, blob_id=None, lfs=None)""], ""spaces"": [""KBaba7/Quant"", ""bhaskartripathi/LLM_Quantization"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""ruslanmv/convert_to_gguf"", ""bobscorporation/LatitudeGames-Wayfarer-12B"", ""K00B404/LLM_Quantization""], ""safetensors"": {""parameters"": {""BF16"": 12247802880}, ""total"": 12247802880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-17 14:49:47+00:00"", ""cardData"": ""base_model:\n- mistralai/Mistral-Nemo-Base-2407\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- text adventure\n- roleplay"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6777aedcbbd0ef497bf5bf8b"", ""modelId"": ""LatitudeGames/Wayfarer-12B"", ""usedStorage"": 24512726732}",0,,0,,0,"https://huggingface.co/LatitudeGames/Wayfarer-12B-GGUF, https://huggingface.co/Triangle104/Wayfarer-12B-Q5_K_S-GGUF, https://huggingface.co/mradermacher/Wayfarer-12B-GGUF, https://huggingface.co/mradermacher/Wayfarer-12B-i1-GGUF, https://huggingface.co/mlx-community/Wayfarer-12B-6bit, https://huggingface.co/mlx-community/Wayfarer-12B-4bit, https://huggingface.co/mlx-community/Wayfarer-12B-8bit, https://huggingface.co/tensorblock/Wayfarer-12B-GGUF, https://huggingface.co/bartowski/Wayfarer-12B-GGUF, https://huggingface.co/Zuellni/Wayfarer-12B-5.0bpw-exl2, https://huggingface.co/Zuellni/Wayfarer-12B-6.0bpw-exl2, https://huggingface.co/Triangle104/Wayfarer-12B-Q8_0-GGUF, https://huggingface.co/Triangle104/Wayfarer-12B-Q4_K_S-GGUF, https://huggingface.co/Triangle104/Wayfarer-12B-Q4_K_M-GGUF, https://huggingface.co/Triangle104/Wayfarer-12B-Q5_K_M-GGUF, https://huggingface.co/Triangle104/Wayfarer-12B-Q6_K-GGUF, https://huggingface.co/Jellon/Wayfarer-12B-exl2-6bpw, https://huggingface.co/DevQuasar/LatitudeGames.Wayfarer-12B-GGUF, https://huggingface.co/Jellon/Wayfarer-12B-exl2-4bpw, https://huggingface.co/mrfakename/Wayfarer-12B-Q4-mlx, https://huggingface.co/noneUsername/Wayfarer-12B-W8A8, https://huggingface.co/waldie/Wayfarer-12B-8bpw-h8-exl2",22,"https://huggingface.co/Lambent/Silver5-Nemo-12B, https://huggingface.co/redrix/wuriaee-12B-schizostock, https://huggingface.co/redrix/sororicide-12B-Farer-Mell-Unslop, https://huggingface.co/redrix/GodSlayer-12B-ABYSS, https://huggingface.co/Nitral-AI/Wayfarer_Eris_Noctis-12B, https://huggingface.co/Aleteian/Pathfinder-RP-12B-RU, https://huggingface.co/KatyTheCutie/Repose-12B, https://huggingface.co/ClaudioItaly/Sensorial-V2-12B, https://huggingface.co/Aleteian/Legend-of-the-Four-Winds-MN-12B, https://huggingface.co/Aleteian/Way-to-Unseen-Horizon-MN-12B, https://huggingface.co/mergekit-community/MN-Sappho-a-12B, https://huggingface.co/mergekit-community/MN-Sappho-b-12B, https://huggingface.co/mergekit-community/MN-Sappho-c-12B, https://huggingface.co/KatyTheCutie/Repose-V2-A2, https://huggingface.co/KatyTheCutie/Repose-V2-6O, https://huggingface.co/Aleteian/Legend-of-the-Four-Winds-2-MN-12B, https://huggingface.co/mergekit-community/MN-Sappho-e-12B, https://huggingface.co/mergekit-community/MN-Sappho-g-12B, https://huggingface.co/mergekit-community/MN-Sappho-g2-12B, https://huggingface.co/Aleteian/Hello-Darkness-My-Old-Friend-MN-12B, https://huggingface.co/mergekit-community/MN-Sappho-l-12B, https://huggingface.co/mergekit-community/MN-Sappho-m-12B, https://huggingface.co/mergekit-community/MN-Sappho-g3-12B, https://huggingface.co/yamatazen/Aurora-SCE-12B, https://huggingface.co/mergekit-community/MN-Sappho-n-12B, https://huggingface.co/mergekit-community/MN-Sappho-n2-12B, https://huggingface.co/SanXM1/Driftwood-12B, https://huggingface.co/DoppelReflEx/MN-12B-Mimicore-Nocturne, https://huggingface.co/mergekit-community/MN-Anathema-12B, https://huggingface.co/mergekit-community/MN-Hecate-Chthonia-12B, https://huggingface.co/mergekit-community/MN-Hekate-Geneteira-12B, https://huggingface.co/Triangle104/Wayfare-Chronicles-Ink_12B, https://huggingface.co/Triangle104/Wayfare-Chronicles-Gold_12B, https://huggingface.co/Triangle104/Wayfare-Chronicles-Gutenberg_12B, https://huggingface.co/mergekit-community/MN-Slush-Wayfarer, https://huggingface.co/IAmTheCollector/MN-Slush-GGLD-Wayfarer, https://huggingface.co/Aleteian/Wayfinder, https://huggingface.co/Aleteian/WayToHumanity, https://huggingface.co/ClaudioItaly/Sensorial-12B, https://huggingface.co/mergekit-community/ChatWaifu-Wayfarer-12B, https://huggingface.co/mergekit-community/mergekit-task_arithmetic-zxjskqt, https://huggingface.co/mergekit-community/mergekit-model_stock-hwudfad, https://huggingface.co/mergekit-community/MN-Sappho-h-12B, https://huggingface.co/mergekit-community/MN-Sappho-i-12B, https://huggingface.co/mergekit-community/MN-Sappho-n3-12B, https://huggingface.co/NewEden/Delta-Vector_driftwood-exl2, https://huggingface.co/mergekit-community/mergekit-ties-okotcvk, https://huggingface.co/mergekit-community/mergekit-dare_ties-eovqfju, https://huggingface.co/mergekit-community/MN-Hekate-Nykhia-17B, https://huggingface.co/DreadPoor/YM-12B-Model_Stock, https://huggingface.co/mergekit-community/MN-Hekate-Noctiluca-12B, https://huggingface.co/mergekit-community/MN-Hekate-Noctiluca-12B-v2",52,"FallnAI/Quantize-HF-Models, K00B404/LLM_Quantization, KBaba7/Quant, bhaskartripathi/LLM_Quantization, bobscorporation/LatitudeGames-Wayfarer-12B, huggingface/InferenceSupport/discussions/new?title=LatitudeGames/Wayfarer-12B&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BLatitudeGames%2FWayfarer-12B%5D(%2FLatitudeGames%2FWayfarer-12B)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ruslanmv/convert_to_gguf, totolook/Quant",8
XTTS-v1_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ coqui/XTTS-v1,"---
3
+ license: other
4
+ license_name: coqui-public-model-license
5
+ license_link: https://coqui.ai/cpml
6
+ library_name: coqui
7
+ pipeline_tag: text-to-speech
8
+ ---
9
+
10
+ # ⓍTTS
11
+ ⓍTTS is a Voice generation model that lets you clone voices into different languages by using just a quick 6-second audio clip. Built on Tortoise,
12
+ ⓍTTS has important model changes that make cross-language voice cloning and multi-lingual speech generation super easy.
13
+ There is no need for an excessive amount of training data that spans countless hours.
14
+
15
+ This is the same model that powers [Coqui Studio](https://coqui.ai/), and [Coqui API](https://docs.coqui.ai/docs), however we apply
16
+ a few tricks to make it faster and support streaming inference.
17
+
18
+ ## NOTE: ⓍTTS V2 model is out here [XTTS V2](https://huggingface.co/coqui/XTTS-v2)
19
+
20
+ ### Features
21
+ - Supports 14 languages.
22
+ - Voice cloning with just a 6-second audio clip.
23
+ - Emotion and style transfer by cloning.
24
+ - Cross-language voice cloning.
25
+ - Multi-lingual speech generation.
26
+ - 24khz sampling rate.
27
+
28
+ ### Languages
29
+ As of now, XTTS-v1 (v1.1) supports 14 languages: **English, Spanish, French, German, Italian, Portuguese,
30
+ Polish, Turkish, Russian, Dutch, Czech, Arabic, Chinese, and Japanese**.
31
+
32
+ Stay tuned as we continue to add support for more languages. If you have any language requests, please feel free to reach out!
33
+
34
+ ### Code
35
+ The current implementation supports inference and [fine-tuning](https://tts.readthedocs.io/en/latest/models/xtts.html#training).
36
+
37
+ ### License
38
+ This model is licensed under [Coqui Public Model License](https://coqui.ai/cpml). There's a lot that goes into a license for generative models, and you can read more of [the origin story of CPML here](https://coqui.ai/blog/tts/cpml).
39
+
40
+ ### Contact
41
+ Come and join in our 🐸Community. We're active on [Discord](https://discord.gg/fBC58unbKE) and [Twitter](https://twitter.com/coqui_ai).
42
+ You can also mail us at info@coqui.ai.
43
+
44
+ Using 🐸TTS API:
45
+
46
+ ```python
47
+ from TTS.api import TTS
48
+ tts = TTS(""tts_models/multilingual/multi-dataset/xtts_v1"", gpu=True)
49
+
50
+ # generate speech by cloning a voice using default settings
51
+ tts.tts_to_file(text=""It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent."",
52
+ file_path=""output.wav"",
53
+ speaker_wav=""/path/to/target/speaker.wav"",
54
+ language=""en"")
55
+
56
+ # generate speech by cloning a voice using custom settings
57
+ tts.tts_to_file(text=""It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent."",
58
+ file_path=""output.wav"",
59
+ speaker_wav=""/path/to/target/speaker.wav"",
60
+ language=""en"",
61
+ decoder_iterations=30)
62
+ ```
63
+
64
+ Using 🐸TTS Command line:
65
+
66
+ ```console
67
+ tts --model_name tts_models/multilingual/multi-dataset/xtts_v1 \
68
+ --text ""Bugün okula gitmek istemiyorum."" \
69
+ --speaker_wav /path/to/target/speaker.wav \
70
+ --language_idx tr \
71
+ --use_cuda true
72
+ ```
73
+
74
+ Using model directly:
75
+
76
+ ```python
77
+ from TTS.tts.configs.xtts_config import XttsConfig
78
+ from TTS.tts.models.xtts import Xtts
79
+
80
+ config = XttsConfig()
81
+ config.load_json(""/path/to/xtts/config.json"")
82
+ model = Xtts.init_from_config(config)
83
+ model.load_checkpoint(config, checkpoint_dir=""/path/to/xtts/"", eval=True)
84
+ model.cuda()
85
+
86
+ outputs = model.synthesize(
87
+ ""It took me quite a long time to develop a voice and now that I have it I am not going to be silent."",
88
+ config,
89
+ speaker_wav=""/data/TTS-public/_refclips/3.wav"",
90
+ gpt_cond_len=3,
91
+ language=""en"",
92
+ )
93
+ ```","{""id"": ""coqui/XTTS-v1"", ""author"": ""coqui"", ""sha"": ""590756d186823eb1e9141be223b00a8ebc998c54"", ""last_modified"": ""2023-11-10 19:40:20+00:00"", ""created_at"": ""2023-09-13 09:22:03+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 5684, ""downloads_all_time"": null, ""likes"": 369, ""library_name"": ""coqui"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""coqui"", ""text-to-speech"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-speech"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: coqui\nlicense: other\nlicense_name: coqui-public-model-license\nlicense_link: https://coqui.ai/cpml\npipeline_tag: text-to-speech"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config_v1.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.pth', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Olivier-Truong/XTTS_V1_CPU_working"", ""Olivier-Truong/XTTS_V2_CPU_working"", ""kotoba-tech/TTS-Arena-JA"", ""mrm8488/xtts-spanish"", ""Egmdon/XTTS_V2_CPU_working"", ""gorkemgoknar/xtts-streaming"", ""LPhilp1943/speech2speech_voice_cloning_v2"", ""Fabrice-TIERCELIN/Multi-language_Text-to-Speech"", ""antoniomae/xtts2"", ""ysharma/xtts"", ""kevinwang676/xtts"", ""lucianne/XTTS_V3_CPU_working"", ""Maoweicao/xttsv2"", ""1littlecoder/xtts"", ""gukisan/xtts"", ""espeon/xtts"", ""khetag/xtts"", ""Zannriell/xtts"", ""valentinlica/xtts"", ""sand-box/nnf_text_to_speech_v2_dev"", ""vivasvan100/xtts"", ""reach-vb/xtts-streaming"", ""antoniomae/coquixtts"", ""nitintit/xtts"", ""cocktailpeanut/xtts-streaming"", ""antoniomae1234/HTTS-VOITS-XTTS"", ""com-ple7e/XTTS-running-on-basic-cpu"", ""krishna195/Krishna_TTS_model"", ""vuxuanhoan/XTTS_V1_CPU_working"", ""gregory-237/ATLANTA_TSS"", ""Desident/Multi-language_Text-to-Speech""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-10 19:40:20+00:00"", ""cardData"": ""library_name: coqui\nlicense: other\nlicense_name: coqui-public-model-license\nlicense_link: https://coqui.ai/cpml\npipeline_tag: text-to-speech"", ""transformersInfo"": null, ""_id"": ""65017f3bf0e75a40c4115cb0"", ""modelId"": ""coqui/XTTS-v1"", ""usedStorage"": 33359253546}",0,,0,,0,,0,,0,"Egmdon/XTTS_V2_CPU_working, Fabrice-TIERCELIN/Multi-language_Text-to-Speech, LPhilp1943/speech2speech_voice_cloning_v2, Olivier-Truong/XTTS_V1_CPU_working, Olivier-Truong/XTTS_V2_CPU_working, antoniomae/xtts2, gorkemgoknar/xtts-streaming, huggingface/InferenceSupport/discussions/new?title=coqui/XTTS-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcoqui%2FXTTS-v1%5D(%2Fcoqui%2FXTTS-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kevinwang676/xtts, kotoba-tech/TTS-Arena-JA, lucianne/XTTS_V3_CPU_working, mrm8488/xtts-spanish, ysharma/xtts",13
Yi-34B-200K_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
The diff for this file is too large to render. See raw diff
 
bart-large_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
The diff for this file is too large to render. See raw diff
 
bertweet-base-sentiment-analysis_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff
 
canary-1b_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ nvidia/canary-1b,"---
3
+ license: cc-by-nc-4.0
4
+ language:
5
+ - en
6
+ - de
7
+ - es
8
+ - fr
9
+ library_name: nemo
10
+ datasets:
11
+ - librispeech_asr
12
+ - fisher_corpus
13
+ - Switchboard-1
14
+ - WSJ-0
15
+ - WSJ-1
16
+ - National-Singapore-Corpus-Part-1
17
+ - National-Singapore-Corpus-Part-6
18
+ - vctk
19
+ - voxpopuli
20
+ - europarl
21
+ - multilingual_librispeech
22
+ - mozilla-foundation/common_voice_8_0
23
+ - MLCommons/peoples_speech
24
+ thumbnail: null
25
+ tags:
26
+ - automatic-speech-recognition
27
+ - automatic-speech-translation
28
+ - speech
29
+ - audio
30
+ - Transformer
31
+ - FastConformer
32
+ - Conformer
33
+ - pytorch
34
+ - NeMo
35
+ - hf-asr-leaderboard
36
+ widget:
37
+ - example_title: Librispeech sample 1
38
+ src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
39
+ - example_title: Librispeech sample 2
40
+ src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
41
+ model-index:
42
+ - name: canary-1b
43
+ results:
44
+ - task:
45
+ name: Automatic Speech Recognition
46
+ type: automatic-speech-recognition
47
+ dataset:
48
+ name: LibriSpeech (other)
49
+ type: librispeech_asr
50
+ config: other
51
+ split: test
52
+ args:
53
+ language: en
54
+ metrics:
55
+ - name: Test WER
56
+ type: wer
57
+ value: 2.89
58
+ - task:
59
+ type: Automatic Speech Recognition
60
+ name: automatic-speech-recognition
61
+ dataset:
62
+ name: SPGI Speech
63
+ type: kensho/spgispeech
64
+ config: test
65
+ split: test
66
+ args:
67
+ language: en
68
+ metrics:
69
+ - name: Test WER
70
+ type: wer
71
+ value: 4.79
72
+ - task:
73
+ type: Automatic Speech Recognition
74
+ name: automatic-speech-recognition
75
+ dataset:
76
+ name: Mozilla Common Voice 16.1
77
+ type: mozilla-foundation/common_voice_16_1
78
+ config: en
79
+ split: test
80
+ args:
81
+ language: en
82
+ metrics:
83
+ - name: Test WER (En)
84
+ type: wer
85
+ value: 7.97
86
+ - task:
87
+ type: Automatic Speech Recognition
88
+ name: automatic-speech-recognition
89
+ dataset:
90
+ name: Mozilla Common Voice 16.1
91
+ type: mozilla-foundation/common_voice_16_1
92
+ config: de
93
+ split: test
94
+ args:
95
+ language: de
96
+ metrics:
97
+ - name: Test WER (De)
98
+ type: wer
99
+ value: 4.61
100
+ - task:
101
+ type: Automatic Speech Recognition
102
+ name: automatic-speech-recognition
103
+ dataset:
104
+ name: Mozilla Common Voice 16.1
105
+ type: mozilla-foundation/common_voice_16_1
106
+ config: es
107
+ split: test
108
+ args:
109
+ language: es
110
+ metrics:
111
+ - name: Test WER (ES)
112
+ type: wer
113
+ value: 3.99
114
+ - task:
115
+ type: Automatic Speech Recognition
116
+ name: automatic-speech-recognition
117
+ dataset:
118
+ name: Mozilla Common Voice 16.1
119
+ type: mozilla-foundation/common_voice_16_1
120
+ config: fr
121
+ split: test
122
+ args:
123
+ language: fr
124
+ metrics:
125
+ - name: Test WER (Fr)
126
+ type: wer
127
+ value: 6.53
128
+ - task:
129
+ type: Automatic Speech Translation
130
+ name: automatic-speech-translation
131
+ dataset:
132
+ name: FLEURS
133
+ type: google/fleurs
134
+ config: en_us
135
+ split: test
136
+ args:
137
+ language: en-de
138
+ metrics:
139
+ - name: Test BLEU (En->De)
140
+ type: bleu
141
+ value: 32.15
142
+ - task:
143
+ type: Automatic Speech Translation
144
+ name: automatic-speech-translation
145
+ dataset:
146
+ name: FLEURS
147
+ type: google/fleurs
148
+ config: en_us
149
+ split: test
150
+ args:
151
+ language: en-de
152
+ metrics:
153
+ - name: Test BLEU (En->Es)
154
+ type: bleu
155
+ value: 22.66
156
+ - task:
157
+ type: Automatic Speech Translation
158
+ name: automatic-speech-translation
159
+ dataset:
160
+ name: FLEURS
161
+ type: google/fleurs
162
+ config: en_us
163
+ split: test
164
+ args:
165
+ language: en-de
166
+ metrics:
167
+ - name: Test BLEU (En->Fr)
168
+ type: bleu
169
+ value: 40.76
170
+ - task:
171
+ type: Automatic Speech Translation
172
+ name: automatic-speech-translation
173
+ dataset:
174
+ name: FLEURS
175
+ type: google/fleurs
176
+ config: de_de
177
+ split: test
178
+ args:
179
+ language: de-en
180
+ metrics:
181
+ - name: Test BLEU (De->En)
182
+ type: bleu
183
+ value: 33.98
184
+ - task:
185
+ type: Automatic Speech Translation
186
+ name: automatic-speech-translation
187
+ dataset:
188
+ name: FLEURS
189
+ type: google/fleurs
190
+ config: es_419
191
+ split: test
192
+ args:
193
+ language: es-en
194
+ metrics:
195
+ - name: Test BLEU (Es->En)
196
+ type: bleu
197
+ value: 21.80
198
+ - task:
199
+ type: Automatic Speech Translation
200
+ name: automatic-speech-translation
201
+ dataset:
202
+ name: FLEURS
203
+ type: google/fleurs
204
+ config: fr_fr
205
+ split: test
206
+ args:
207
+ language: fr-en
208
+ metrics:
209
+ - name: Test BLEU (Fr->En)
210
+ type: bleu
211
+ value: 30.95
212
+ - task:
213
+ type: Automatic Speech Translation
214
+ name: automatic-speech-translation
215
+ dataset:
216
+ name: COVOST
217
+ type: covost2
218
+ config: de_de
219
+ split: test
220
+ args:
221
+ language: de-en
222
+ metrics:
223
+ - name: Test BLEU (De->En)
224
+ type: bleu
225
+ value: 37.67
226
+ - task:
227
+ type: Automatic Speech Translation
228
+ name: automatic-speech-translation
229
+ dataset:
230
+ name: COVOST
231
+ type: covost2
232
+ config: es_419
233
+ split: test
234
+ args:
235
+ language: es-en
236
+ metrics:
237
+ - name: Test BLEU (Es->En)
238
+ type: bleu
239
+ value: 40.7
240
+ - task:
241
+ type: Automatic Speech Translation
242
+ name: automatic-speech-translation
243
+ dataset:
244
+ name: COVOST
245
+ type: covost2
246
+ config: fr_fr
247
+ split: test
248
+ args:
249
+ language: fr-en
250
+ metrics:
251
+ - name: Test BLEU (Fr->En)
252
+ type: bleu
253
+ value: 40.42
254
+
255
+ metrics:
256
+ - wer
257
+ - bleu
258
+ pipeline_tag: automatic-speech-recognition
259
+ ---
260
+
261
+
262
+ # Canary 1B
263
+
264
+ <style>
265
+ img {
266
+ display: inline;
267
+ }
268
+ </style>
269
+
270
+ [![Model architecture](https://img.shields.io/badge/Model_Arch-FastConformer--Transformer-lightgrey#model-badge)](#model-architecture)
271
+ | [![Model size](https://img.shields.io/badge/Params-1B-lightgrey#model-badge)](#model-architecture)
272
+ | [![Language](https://img.shields.io/badge/Language-multilingual-lightgrey#model-badge)](#datasets)
273
+
274
+ NVIDIA [NeMo Canary](https://nvidia.github.io/NeMo/blogs/2024/2024-02-canary/) is a family of multi-lingual multi-tasking models that achieves state-of-the art performance on multiple benchmarks. With 1 billion parameters, Canary-1B supports automatic speech-to-text recognition (ASR) in 4 languages (English, German, French, Spanish) and translation from English to German/French/Spanish and from German/French/Spanish to English with or without punctuation and capitalization (PnC).
275
+
276
+ **🚨Note: Checkout our latest [Canary-1B-Flash](https://huggingface.co/nvidia/canary-1b-flash) model, a faster and more accurate variant of Canary-1B!**
277
+
278
+ ## Model Architecture
279
+
280
+ Canary is an encoder-decoder model with FastConformer [1] encoder and Transformer Decoder [2].
281
+ With audio features extracted from the encoder, task tokens such as `<source language>`, `<target language>`, `<task>` and `<toggle PnC>`
282
+ are fed into the Transformer Decoder to trigger the text generation process. Canary uses a concatenated tokenizer [5] from individual
283
+ SentencePiece [3] tokenizers of each language, which makes it easy to scale up to more languages.
284
+ The Canay-1B model has 24 encoder layers and 24 layers of decoder layers in total.
285
+
286
+
287
+ ## NVIDIA NeMo
288
+
289
+ To train, fine-tune or Transcribe with Canary, you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed Cython and latest PyTorch version.
290
+ ```
291
+ pip install git+https://github.com/NVIDIA/NeMo.git@r1.23.0#egg=nemo_toolkit[asr]
292
+ ```
293
+
294
+
295
+ ## How to Use this Model
296
+
297
+ The model is available for use in the NeMo toolkit [4], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset.
298
+
299
+ ### Loading the Model
300
+
301
+ ```python
302
+ from nemo.collections.asr.models import EncDecMultiTaskModel
303
+
304
+ # load model
305
+ canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
306
+
307
+ # update dcode params
308
+ decode_cfg = canary_model.cfg.decoding
309
+ decode_cfg.beam.beam_size = 1
310
+ canary_model.change_decoding_strategy(decode_cfg)
311
+ ```
312
+
313
+ ### Input Format
314
+ Input to Canary can be either a list of paths to audio files or a jsonl manifest file.
315
+
316
+ If the input is a list of paths, Canary assumes that the audio is English and Transcribes it. I.e., Canary default behaviour is English ASR.
317
+ ```python
318
+ predicted_text = canary_model.transcribe(
319
+ paths2audio_files=['path1.wav', 'path2.wav'],
320
+ batch_size=16, # batch size to run the inference with
321
+ )[0].text
322
+ ```
323
+
324
+ To use Canary for transcribing other supported languages or perform Speech-to-Text translation, specify the input as jsonl manifest file, where each line in the file is a dictionary containing the following fields:
325
+
326
+ ```yaml
327
+ # Example of a line in input_manifest.json
328
+ {
329
+ ""audio_filepath"": ""/path/to/audio.wav"", # path to the audio file
330
+ ""duration"": 1000, # duration of the audio, can be set to `None` if using NeMo main branch
331
+ ""taskname"": ""asr"", # use ""s2t_translation"" for speech-to-text translation with r1.23, or ""ast"" if using the NeMo main branch
332
+ ""source_lang"": ""en"", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
333
+ ""target_lang"": ""en"", # language of the text output, choices=['en','de','es','fr']
334
+ ""pnc"": ""yes"", # whether to have PnC output, choices=['yes', 'no']
335
+ ""answer"": ""na"",
336
+ }
337
+ ```
338
+
339
+ and then use:
340
+ ```python
341
+ predicted_text = canary_model.transcribe(
342
+ ""<path to input manifest file>"",
343
+ batch_size=16, # batch size to run the inference with
344
+ )[0].text
345
+ ```
346
+
347
+
348
+ ### Automatic Speech-to-text Recognition (ASR)
349
+
350
+ An example manifest for transcribing English audios can be:
351
+
352
+ ```yaml
353
+ # Example of a line in input_manifest.json
354
+ {
355
+ ""audio_filepath"": ""/path/to/audio.wav"", # path to the audio file
356
+ ""duration"": 1000, # duration of the audio, can be set to `None` if using NeMo main branch
357
+ ""taskname"": ""asr"",
358
+ ""source_lang"": ""en"", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
359
+ ""target_lang"": ""en"", # language of the text output, choices=['en','de','es','fr']
360
+ ""pnc"": ""yes"", # whether to have PnC output, choices=['yes', 'no']
361
+ ""answer"": ""na"",
362
+ }
363
+ ```
364
+
365
+
366
+ ### Automatic Speech-to-text Translation (AST)
367
+
368
+ An example manifest for transcribing English audios into German text can be:
369
+
370
+ ```yaml
371
+ # Example of a line in input_manifest.json
372
+ {
373
+ ""audio_filepath"": ""/path/to/audio.wav"", # path to the audio file
374
+ ""duration"": 1000, # duration of the audio, can be set to `None` if using NeMo main branch
375
+ ""taskname"": ""s2t_translation"", # r1.23 only recognizes ""s2t_translation"", but ""ast"" is supported if using the NeMo main branch
376
+ ""source_lang"": ""en"", # language of the audio input, choices=['en','de','es','fr']
377
+ ""target_lang"": ""de"", # language of the text output, choices=['en','de','es','fr']
378
+ ""pnc"": ""yes"", # whether to have PnC output, choices=['yes', 'no']
379
+ ""answer"": ""na""
380
+ }
381
+ ```
382
+
383
+ Alternatively, one can use `transcribe_speech.py` script to do the same.
384
+
385
+ ```bash
386
+ python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py
387
+ pretrained_name=""nvidia/canary-1b""
388
+ audio_dir=""<path to audio_directory>"" # transcribes all the wav files in audio_directory
389
+ ```
390
+
391
+
392
+ ```bash
393
+ python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py
394
+ pretrained_name=""nvidia/canary-1b""
395
+ dataset_manifest=""<path to manifest file>""
396
+ ```
397
+
398
+
399
+ ### Input
400
+
401
+ This model accepts single channel (mono) audio sampled at 16000 Hz, along with the task/languages/PnC tags as input.
402
+
403
+ ### Output
404
+
405
+ The model outputs the transcribed/translated text corresponding to the input audio, in the specified target language and with or without punctuation and capitalization.
406
+
407
+
408
+
409
+ ## Training
410
+
411
+ Canary-1B is trained using the NVIDIA NeMo toolkit [4] for 150k steps with dynamic bucketing and a batch duration of 360s per GPU on 128 NVIDIA A100 80GB GPUs.
412
+ The model can be trained using this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_multitask/speech_to_text_aed.py) and [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml).
413
+
414
+ The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py).
415
+
416
+
417
+ ### Datasets
418
+
419
+ The Canary-1B model is trained on a total of 85k hrs of speech data. It consists of 31k hrs of public data, 20k hrs collected by [Suno](https://suno.ai/), and 34k hrs of in-house data.
420
+
421
+ The constituents of public data are as follows.
422
+
423
+ #### English (25.5k hours)
424
+ - Librispeech 960 hours
425
+ - Fisher Corpus
426
+ - Switchboard-1 Dataset
427
+ - WSJ-0 and WSJ-1
428
+ - National Speech Corpus (Part 1, Part 6)
429
+ - VCTK
430
+ - VoxPopuli (EN)
431
+ - Europarl-ASR (EN)
432
+ - Multilingual Librispeech (MLS EN) - 2,000 hour subset
433
+ - Mozilla Common Voice (v7.0)
434
+ - People's Speech - 12,000 hour subset
435
+ - Mozilla Common Voice (v11.0) - 1,474 hour subset
436
+
437
+ #### German (2.5k hours)
438
+ - Mozilla Common Voice (v12.0) - 800 hour subset
439
+ - Multilingual Librispeech (MLS DE) - 1,500 hour subset
440
+ - VoxPopuli (DE) - 200 hr subset
441
+
442
+ #### Spanish (1.4k hours)
443
+ - Mozilla Common Voice (v12.0) - 395 hour subset
444
+ - Multilingual Librispeech (MLS ES) - 780 hour subset
445
+ - VoxPopuli (ES) - 108 hour subset
446
+ - Fisher - 141 hour subset
447
+
448
+ #### French (1.8k hours)
449
+ - Mozilla Common Voice (v12.0) - 708 hour subset
450
+ - Multilingual Librispeech (MLS FR) - 926 hour subset
451
+ - VoxPopuli (FR) - 165 hour subset
452
+
453
+
454
+ ## Performance
455
+
456
+ In both ASR and AST experiments, predictions were generated using beam search with width 5 and length penalty 1.0.
457
+
458
+ ### ASR Performance (w/o PnC)
459
+
460
+ The ASR performance is measured with word error rate (WER), and we process the groundtruth and predicted text with [whisper-normalizer](https://pypi.org/project/whisper-normalizer/).
461
+
462
+ WER on [MCV-16.1](https://commonvoice.mozilla.org/en/datasets) test set:
463
+
464
+ | **Version** | **Model** | **En** | **De** | **Es** | **Fr** |
465
+ |:---------:|:-----------:|:------:|:------:|:------:|:------:|
466
+ | 1.23.0 | canary-1b | 7.97 | 4.61 | 3.99 | 6.53 |
467
+
468
+
469
+ WER on [MLS](https://huggingface.co/datasets/facebook/multilingual_librispeech) test set:
470
+
471
+ | **Version** | **Model** | **En** | **De** | **Es** | **Fr** |
472
+ |:---------:|:-----------:|:------:|:------:|:------:|:------:|
473
+ | 1.23.0 | canary-1b | 3.06 | 4.19 | 3.15 | 4.12 |
474
+
475
+
476
+ More details on evaluation can be found at [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard)
477
+
478
+ ### AST Performance
479
+
480
+ We evaluate AST performance with [BLEU score](https://lightning.ai/docs/torchmetrics/stable/text/sacre_bleu_score.html), and use native annotations with punctuation and capitalization in the datasets.
481
+
482
+ BLEU score on [FLEURS](https://huggingface.co/datasets/google/fleurs) test set:
483
+
484
+ | **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** | **De->En** | **Es->En** | **Fr->En** |
485
+ |:-----------:|:---------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
486
+ | 1.23.0 | canary-1b | 32.15 | 22.66 | 40.76 | 33.98 | 21.80 | 30.95 |
487
+
488
+
489
+ BLEU score on [COVOST-v2](https://github.com/facebookresearch/covost) test set:
490
+
491
+ | **Version** | **Model** | **De->En** | **Es->En** | **Fr->En** |
492
+ |:-----------:|:---------:|:----------:|:----------:|:----------:|
493
+ | 1.23.0 | canary-1b | 37.67 | 40.7 | 40.42 |
494
+
495
+ BLEU score on [mExpresso](https://huggingface.co/facebook/seamless-expressive#mexpresso-multilingual-expresso) test set:
496
+
497
+ | **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** |
498
+ |:-----------:|:---------:|:----------:|:----------:|:----------:|
499
+ | 1.23.0 | canary-1b | 23.84 | 35.74 | 28.29 |
500
+
501
+ ## Model Fairness Evaluation
502
+
503
+ As outlined in the paper ""Towards Measuring Fairness in AI: the Casual Conversations Dataset"", we assessed the Canary-1B model for fairness. The model was evaluated on the CausalConversations-v1 dataset, and the results are reported as follows:
504
+
505
+ ### Gender Bias:
506
+
507
+ | Gender | Male | Female | N/A | Other |
508
+ | :--- | :--- | :--- | :--- | :--- |
509
+ | Num utterances | 19325 | 24532 | 926 | 33 |
510
+ | % WER | 14.64 | 12.92 | 17.88 | 126.92 |
511
+
512
+ ### Age Bias:
513
+
514
+ | Age Group | (18-30) | (31-45) | (46-85) | (1-100) |
515
+ | :--- | :--- | :--- | :--- | :--- |
516
+ | Num utterances | 15956 | 14585 | 13349 | 43890 |
517
+ | % WER | 14.64 | 13.07 | 13.47 | 13.76 |
518
+
519
+ (Error rates for fairness evaluation are determined by normalizing both the reference and predicted text, similar to the methods used in the evaluations found at https://github.com/huggingface/open_asr_leaderboard.)
520
+
521
+ ## NVIDIA Riva: Deployment
522
+
523
+ [NVIDIA Riva](https://developer.nvidia.com/riva), is an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, on edge, and embedded.
524
+ Additionally, Riva provides:
525
+
526
+ * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours
527
+ * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization
528
+ * Streaming speech recognition, Kubernetes compatible scaling, and enterprise-grade support
529
+
530
+ Canary is available as a NIM endpoint via Riva. Try the model yourself here: [https://build.nvidia.com/nvidia/canary-1b-asr](https://build.nvidia.com/nvidia/canary-1b-asr).
531
+
532
+
533
+ ## References
534
+ [1] [Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition](https://arxiv.org/abs/2305.05084)
535
+
536
+ [2] [Attention is all you need](https://arxiv.org/abs/1706.03762)
537
+
538
+ [3] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece)
539
+
540
+ [4] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
541
+
542
+ [5] [Unified Model for Code-Switching Speech Recognition and Language Identification Based on Concatenated Tokenizer](https://aclanthology.org/2023.calcs-1.7.pdf)
543
+
544
+ ## Licence
545
+
546
+ License to use this model is covered by the [CC-BY-NC-4.0](https://creativecommons.org/licenses/by-nc/4.0/deed.en#:~:text=NonCommercial%20%E2%80%94%20You%20may%20not%20use,doing%20anything%20the%20license%20permits.). By downloading the public and release version of the model, you accept the terms and conditions of the CC-BY-NC-4.0 license.","{""id"": ""nvidia/canary-1b"", ""author"": ""nvidia"", ""sha"": ""d68d53f1b0f6e45d528edbb0b8e247f28d476a7e"", ""last_modified"": ""2025-04-24 00:04:08+00:00"", ""created_at"": ""2024-02-07 17:20:55+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 9714, ""downloads_all_time"": null, ""likes"": 421, ""library_name"": ""nemo"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""nemo"", ""automatic-speech-recognition"", ""automatic-speech-translation"", ""speech"", ""audio"", ""Transformer"", ""FastConformer"", ""Conformer"", ""pytorch"", ""NeMo"", ""hf-asr-leaderboard"", ""en"", ""de"", ""es"", ""fr"", ""dataset:librispeech_asr"", ""dataset:fisher_corpus"", ""dataset:Switchboard-1"", ""dataset:WSJ-0"", ""dataset:WSJ-1"", ""dataset:National-Singapore-Corpus-Part-1"", ""dataset:National-Singapore-Corpus-Part-6"", ""dataset:vctk"", ""dataset:voxpopuli"", ""dataset:europarl"", ""dataset:multilingual_librispeech"", ""dataset:mozilla-foundation/common_voice_8_0"", ""dataset:MLCommons/peoples_speech"", ""arxiv:2305.05084"", ""arxiv:1706.03762"", ""license:cc-by-nc-4.0"", ""model-index"", ""region:us""], ""pipeline_tag"": ""automatic-speech-recognition"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- librispeech_asr\n- fisher_corpus\n- Switchboard-1\n- WSJ-0\n- WSJ-1\n- National-Singapore-Corpus-Part-1\n- National-Singapore-Corpus-Part-6\n- vctk\n- voxpopuli\n- europarl\n- multilingual_librispeech\n- mozilla-foundation/common_voice_8_0\n- MLCommons/peoples_speech\nlanguage:\n- en\n- de\n- es\n- fr\nlibrary_name: nemo\nlicense: cc-by-nc-4.0\nmetrics:\n- wer\n- bleu\npipeline_tag: automatic-speech-recognition\ntags:\n- automatic-speech-recognition\n- automatic-speech-translation\n- speech\n- audio\n- Transformer\n- FastConformer\n- Conformer\n- pytorch\n- NeMo\n- hf-asr-leaderboard\nwidget:\n- example_title: Librispeech sample 1\n src: https://cdn-media.huggingface.co/speech_samples/sample1.flac\n- example_title: Librispeech sample 2\n src: https://cdn-media.huggingface.co/speech_samples/sample2.flac\nmodel-index:\n- name: canary-1b\n results:\n - task:\n type: automatic-speech-recognition\n name: Automatic Speech Recognition\n dataset:\n name: LibriSpeech (other)\n type: librispeech_asr\n config: other\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 2.89\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: SPGI Speech\n type: kensho/spgispeech\n config: test\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 4.79\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: en\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 7.97\n name: Test WER (En)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: de\n split: test\n args:\n language: de\n metrics:\n - type: wer\n value: 4.61\n name: Test WER (De)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: es\n split: test\n args:\n language: es\n metrics:\n - type: wer\n value: 3.99\n name: Test WER (ES)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: fr\n split: test\n args:\n language: fr\n metrics:\n - type: wer\n value: 6.53\n name: Test WER (Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: en_us\n split: test\n args:\n language: en-de\n metrics:\n - type: bleu\n value: 32.15\n name: Test BLEU (En->De)\n verified: false\n - type: bleu\n value: 22.66\n name: Test BLEU (En->Es)\n verified: false\n - type: bleu\n value: 40.76\n name: Test BLEU (En->Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 33.98\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 21.8\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 30.95\n name: Test BLEU (Fr->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 37.67\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 40.7\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 40.42\n name: Test BLEU (Fr->En)\n verified: false"", ""widget_data"": [{""example_title"": ""Librispeech sample 1"", ""src"": ""https://cdn-media.huggingface.co/speech_samples/sample1.flac""}, {""example_title"": ""Librispeech sample 2"", ""src"": ""https://cdn-media.huggingface.co/speech_samples/sample2.flac""}], ""model_index"": [{""name"": ""canary-1b"", ""results"": [{""task"": {""name"": ""Automatic Speech Recognition"", ""type"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""LibriSpeech (other)"", ""type"": ""librispeech_asr"", ""config"": ""other"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER"", ""type"": ""wer"", ""value"": 2.89, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""SPGI Speech"", ""type"": ""kensho/spgispeech"", ""config"": ""test"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER"", ""type"": ""wer"", ""value"": 4.79, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""en"", ""split"": ""test"", ""args"": {""language"": ""en""}}, ""metrics"": [{""name"": ""Test WER (En)"", ""type"": ""wer"", ""value"": 7.97, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""de"", ""split"": ""test"", ""args"": {""language"": ""de""}}, ""metrics"": [{""name"": ""Test WER (De)"", ""type"": ""wer"", ""value"": 4.61, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""es"", ""split"": ""test"", ""args"": {""language"": ""es""}}, ""metrics"": [{""name"": ""Test WER (ES)"", ""type"": ""wer"", ""value"": 3.99, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Recognition"", ""name"": ""automatic-speech-recognition""}, ""dataset"": {""name"": ""Mozilla Common Voice 16.1"", ""type"": ""mozilla-foundation/common_voice_16_1"", ""config"": ""fr"", ""split"": ""test"", ""args"": {""language"": ""fr""}}, ""metrics"": [{""name"": ""Test WER (Fr)"", ""type"": ""wer"", ""value"": 6.53, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->De)"", ""type"": ""bleu"", ""value"": 32.15, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->Es)"", ""type"": ""bleu"", ""value"": 22.66, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""en_us"", ""split"": ""test"", ""args"": {""language"": ""en-de""}}, ""metrics"": [{""name"": ""Test BLEU (En->Fr)"", ""type"": ""bleu"", ""value"": 40.76, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""de_de"", ""split"": ""test"", ""args"": {""language"": ""de-en""}}, ""metrics"": [{""name"": ""Test BLEU (De->En)"", ""type"": ""bleu"", ""value"": 33.98, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""es_419"", ""split"": ""test"", ""args"": {""language"": ""es-en""}}, ""metrics"": [{""name"": ""Test BLEU (Es->En)"", ""type"": ""bleu"", ""value"": 21.8, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""FLEURS"", ""type"": ""google/fleurs"", ""config"": ""fr_fr"", ""split"": ""test"", ""args"": {""language"": ""fr-en""}}, ""metrics"": [{""name"": ""Test BLEU (Fr->En)"", ""type"": ""bleu"", ""value"": 30.95, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""de_de"", ""split"": ""test"", ""args"": {""language"": ""de-en""}}, ""metrics"": [{""name"": ""Test BLEU (De->En)"", ""type"": ""bleu"", ""value"": 37.67, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""es_419"", ""split"": ""test"", ""args"": {""language"": ""es-en""}}, ""metrics"": [{""name"": ""Test BLEU (Es->En)"", ""type"": ""bleu"", ""value"": 40.7, ""verified"": false}]}, {""task"": {""type"": ""Automatic Speech Translation"", ""name"": ""automatic-speech-translation""}, ""dataset"": {""name"": ""COVOST"", ""type"": ""covost2"", ""config"": ""fr_fr"", ""split"": ""test"", ""args"": {""language"": ""fr-en""}}, ""metrics"": [{""name"": ""Test BLEU (Fr->En)"", ""type"": ""bleu"", ""value"": 40.42, ""verified"": false}]}]}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='canary-1b.nemo', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='canary-model.png', size=None, blob_id=None, lfs=None)""], ""spaces"": [""nvidia/canary-1b"", ""awacke1/Speech-Recognition-Canary-NvidiaT4"", ""gobeldan/canary-1b"", ""jiuuee/my-alexa"", ""almncarlo/myalexa"", ""gdnartea/Chatty_Ashe"", ""VanYsa/MyAlexa"", ""drc97/nvidia-canary-1b"", ""monuene/nvidia-canary-1b"", ""lnajdi/nvidia-canary-1b"", ""Nymbo/canary-1b-TTS-Translate"", ""xeeshanakram/nvidia-canary-1b"", ""bqmolina/MyAlexa"", ""Kindler/197zAlexa"", ""notabaka/nvidia-canary-1b"", ""micoirvin-up/sample-my-alexa"", ""jlcastalla/MyAlexa"", ""ashamshur/nvidia-canary-1b"", ""jdlugo/nvidia-canary-1b-docker"", ""Oreobird/nvidia-canary-1b"", ""rayl-aoit/rayl-gradio-labs"", ""Flymark8/nvidia-canary-1b"", ""cottom/nvidia-canary-1b"", ""rayl-aoit/translate_text_and_speech"", ""umarigan/speech-to-speech-translation"", ""Diego055/translate_text_and_speech"", ""Gyaneshere/Canary-Speech-to-text""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-24 00:04:08+00:00"", ""cardData"": ""datasets:\n- librispeech_asr\n- fisher_corpus\n- Switchboard-1\n- WSJ-0\n- WSJ-1\n- National-Singapore-Corpus-Part-1\n- National-Singapore-Corpus-Part-6\n- vctk\n- voxpopuli\n- europarl\n- multilingual_librispeech\n- mozilla-foundation/common_voice_8_0\n- MLCommons/peoples_speech\nlanguage:\n- en\n- de\n- es\n- fr\nlibrary_name: nemo\nlicense: cc-by-nc-4.0\nmetrics:\n- wer\n- bleu\npipeline_tag: automatic-speech-recognition\ntags:\n- automatic-speech-recognition\n- automatic-speech-translation\n- speech\n- audio\n- Transformer\n- FastConformer\n- Conformer\n- pytorch\n- NeMo\n- hf-asr-leaderboard\nwidget:\n- example_title: Librispeech sample 1\n src: https://cdn-media.huggingface.co/speech_samples/sample1.flac\n- example_title: Librispeech sample 2\n src: https://cdn-media.huggingface.co/speech_samples/sample2.flac\nmodel-index:\n- name: canary-1b\n results:\n - task:\n type: automatic-speech-recognition\n name: Automatic Speech Recognition\n dataset:\n name: LibriSpeech (other)\n type: librispeech_asr\n config: other\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 2.89\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: SPGI Speech\n type: kensho/spgispeech\n config: test\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 4.79\n name: Test WER\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: en\n split: test\n args:\n language: en\n metrics:\n - type: wer\n value: 7.97\n name: Test WER (En)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: de\n split: test\n args:\n language: de\n metrics:\n - type: wer\n value: 4.61\n name: Test WER (De)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: es\n split: test\n args:\n language: es\n metrics:\n - type: wer\n value: 3.99\n name: Test WER (ES)\n verified: false\n - task:\n type: Automatic Speech Recognition\n name: automatic-speech-recognition\n dataset:\n name: Mozilla Common Voice 16.1\n type: mozilla-foundation/common_voice_16_1\n config: fr\n split: test\n args:\n language: fr\n metrics:\n - type: wer\n value: 6.53\n name: Test WER (Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: en_us\n split: test\n args:\n language: en-de\n metrics:\n - type: bleu\n value: 32.15\n name: Test BLEU (En->De)\n verified: false\n - type: bleu\n value: 22.66\n name: Test BLEU (En->Es)\n verified: false\n - type: bleu\n value: 40.76\n name: Test BLEU (En->Fr)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 33.98\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 21.8\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: FLEURS\n type: google/fleurs\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 30.95\n name: Test BLEU (Fr->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: de_de\n split: test\n args:\n language: de-en\n metrics:\n - type: bleu\n value: 37.67\n name: Test BLEU (De->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: es_419\n split: test\n args:\n language: es-en\n metrics:\n - type: bleu\n value: 40.7\n name: Test BLEU (Es->En)\n verified: false\n - task:\n type: Automatic Speech Translation\n name: automatic-speech-translation\n dataset:\n name: COVOST\n type: covost2\n config: fr_fr\n split: test\n args:\n language: fr-en\n metrics:\n - type: bleu\n value: 40.42\n name: Test BLEU (Fr->En)\n verified: false"", ""transformersInfo"": null, ""_id"": ""65c3bbf780497543ca174a16"", ""modelId"": ""nvidia/canary-1b"", ""usedStorage"": 8352624640}",0,,0,,0,,0,,0,"Nymbo/canary-1b-TTS-Translate, VanYsa/MyAlexa, almncarlo/myalexa, ashamshur/nvidia-canary-1b, awacke1/Speech-Recognition-Canary-NvidiaT4, drc97/nvidia-canary-1b, gdnartea/Chatty_Ashe, gobeldan/canary-1b, hf-audio/open_asr_leaderboard, huggingface/InferenceSupport/discussions/new?title=nvidia/canary-1b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bnvidia%2Fcanary-1b%5D(%2Fnvidia%2Fcanary-1b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jiuuee/my-alexa, lnajdi/nvidia-canary-1b, monuene/nvidia-canary-1b, nvidia/canary-1b",14
chatglm3-6b-32k_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ THUDM/chatglm3-6b-32k,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - glm
8
+ - chatglm
9
+ - thudm
10
+ ---
11
+ # ChatGLM3-6B-32K
12
+ <p align=""center"">
13
+ 💻 <a href=""https://github.com/THUDM/ChatGLM"" target=""_blank"">Github Repo</a> • 🐦 <a href=""https://twitter.com/thukeg"" target=""_blank"">Twitter</a> • 📃 <a href=""https://arxiv.org/abs/2103.10360"" target=""_blank"">[GLM@ACL 22]</a> <a href=""https://github.com/THUDM/GLM"" target=""_blank"">[GitHub]</a> • 📃 <a href=""https://arxiv.org/abs/2210.02414"" target=""_blank"">[GLM-130B@ICLR 23]</a> <a href=""https://github.com/THUDM/GLM-130B"" target=""_blank"">[GitHub]</a> <br>
14
+ </p>
15
+
16
+ <p align=""center"">
17
+ 👋 Join our <a href=""https://join.slack.com/t/chatglm/shared_invite/zt-25ti5uohv-A_hs~am_D3Q8XPZMpj7wwQ"" target=""_blank"">Slack</a> and <a href=""https://github.com/THUDM/ChatGLM/blob/main/resources/WECHAT.md"" target=""_blank"">WeChat</a>
18
+ </p>
19
+ <p align=""center"">
20
+ 📍Experience the larger-scale ChatGLM model at <a href=""https://www.chatglm.cn"">chatglm.cn</a>
21
+ </p>
22
+
23
+ ## 介绍 (Introduction)
24
+
25
+ ChatGLM3-6B-32K在[ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b)的基础上进一步强化了对于长文本的理解能力,能够更好的处理最多32K长度的上下文。具体地,我们对位置编码进行了更新,并设计了更有针对性的长文本训练方法,在对话阶段使用 32K 的上下文长度训练。在实际的使用中,如果您面临的上下文长度基本在 **8K 以内**,我们推荐使用[ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b);如果您需要处理**超过 8K** 的上下文长度,我们推荐使用ChatGLM3-6B-32K。
26
+
27
+ ChatGLM3-6B 是 ChatGLM 系列最新一代的开源模型,在保留了前两代模型对话流畅、部署门槛低等众多优秀特性的基础上,ChatGLM3-6B 引入了如下特性:
28
+
29
+ 1. **更强大的基础模型:** ChatGLM3-6B 的基础模型 ChatGLM3-6B-Base 采用了更多样的训练数据、更充分的训练步数和更合理的训练策略。在语义、数学、推理、代码、知识等不同角度的数据集上测评显示,ChatGLM3-6B-Base 具有在 10B 以下的预训练模型中最强的性能。
30
+ 2. **更完整的功能支持:** ChatGLM3-6B 采用了全新设计的 [Prompt 格式](https://github.com/THUDM/ChatGLM3/blob/main/README.md),除正常的多轮对话外。同时原生支持[工具调用](https://github.com/THUDM/ChatGLM3/blob/main/tools_using_demo/README.md)(Function Call)、代码执行(Code Interpreter)和 Agent 任务等复杂场景。
31
+ 3. **更全面的开源序列:** 除了对话模型 ChatGLM3-6B 外,还开源了基础模型 ChatGLM-6B-Base、长文本对话模型 ChatGLM3-6B-32K。以上所有权重对学术研究**完全开放**,在填写[问卷](https://open.bigmodel.cn/mla/form)进行登记后**亦允许免费商业使用**。
32
+
33
+ Based on [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b), ChatGLM3-6B-32K further strengthens the ability to understand long texts and can better handle contexts up to 32K in length. Specifically, we update the position encoding and design a more targeted long text training method, using a context length of 32K for training in the conversation stage. In actual use, if the context length you face is basically within **8K**, we recommend using [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b); if you need to handle **For context lengths exceeding 8K**, we recommend using ChatGLM3-6B-32K.
34
+
35
+ ChatGLM3-6B is the latest open-source model in the ChatGLM series. While retaining many excellent features such as smooth dialogue and low deployment threshold from the previous two generations, ChatGLM3-6B introduces the following features:
36
+
37
+ 1. **More Powerful Base Model:** The base model of ChatGLM3-6B, ChatGLM3-6B-Base, employs a more diverse training dataset, more sufficient training steps, and a more reasonable training strategy. Evaluations on datasets such as semantics, mathematics, reasoning, code, knowledge, etc., show that ChatGLM3-6B-Base has the strongest performance among pre-trained models under 10B.
38
+ 2. **More Comprehensive Function Support:** ChatGLM3-6B adopts a newly designed [Prompt format](https://github.com/THUDM/ChatGLM3/blob/main/PROMPT_en.md), in addition to the normal multi-turn dialogue. It also natively supports [function call](https://github.com/THUDM/ChatGLM3/blob/main/tools_using_demo/README.md), code interpreter, and complex scenarios such as agent tasks.
39
+ 3. **More Comprehensive Open-source Series:** In addition to the dialogue model ChatGLM3-6B, the base model ChatGLM-6B-Base and the long-text dialogue model ChatGLM3-6B-32K are also open-sourced. All the weights are **fully open** for academic research, and after completing the [questionnaire](https://open.bigmodel.cn/mla/form) registration, they are also **allowed for free commercial use**.
40
+
41
+
42
+ ## 软件依赖 (Dependencies)
43
+
44
+ ```shell
45
+ pip install protobuf transformers==4.30.2 cpm_kernels torch>=2.0 gradio mdtex2html sentencepiece accelerate
46
+ ```
47
+
48
+ ## 代码调用 (Code Usage)
49
+
50
+ 可以通过如下代码调用 ChatGLM3-6B 模型来生成对话:
51
+
52
+ ```ipython
53
+ >>> from transformers import AutoTokenizer, AutoModel
54
+ >>> tokenizer = AutoTokenizer.from_pretrained(""THUDM/chatglm3-6b-32k"", trust_remote_code=True)
55
+ >>> model = AutoModel.from_pretrained(""THUDM/chatglm3-6b-32k"", trust_remote_code=True).half().cuda()
56
+ >>> model = model.eval()
57
+ >>> response, history = model.chat(tokenizer, ""你好"", history=[])
58
+ >>> print(response)
59
+ 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。
60
+ >>> response, history = model.chat(tokenizer, ""晚上睡不着应该怎么办"", history=history)
61
+ >>> print(response)
62
+ 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:
63
+
64
+ 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。
65
+ 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。
66
+ 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。
67
+ 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。
68
+ 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。
69
+ 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。
70
+
71
+ 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。
72
+ ```
73
+
74
+ 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM)。
75
+
76
+ For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM).
77
+
78
+
79
+ ## 协议 (License)
80
+
81
+ 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM3-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。
82
+
83
+ The code in this repository is open-sourced under the [Apache-2.0 license](LICENSE), while the use of the ChatGLM3-6B model weights needs to comply with the [Model License](MODEL_LICENSE).
84
+
85
+ ## 引用 (Citation)
86
+
87
+ 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。
88
+
89
+ If you find our work helpful, please consider citing the following paper.
90
+
91
+ ```
92
+ @misc{glm2024chatglm,
93
+ title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools},
94
+ author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang},
95
+ year={2024},
96
+ eprint={2406.12793},
97
+ archivePrefix={arXiv},
98
+ primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'}
99
+ }
100
+ ```
101
+ ","{""id"": ""THUDM/chatglm3-6b-32k"", ""author"": ""THUDM"", ""sha"": ""5e12a231fe1640a5bac73363de0044be6e20b54a"", ""last_modified"": ""2024-08-04 08:42:38+00:00"", ""created_at"": ""2023-10-26 13:04:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 348, ""downloads_all_time"": null, ""likes"": 245, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""chatglm"", ""glm"", ""thudm"", ""custom_code"", ""zh"", ""en"", ""arxiv:2103.10360"", ""arxiv:2210.02414"", ""arxiv:2406.12793"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""widget_data"": null, ""model_index"": null, ""config"": {""model_type"": ""chatglm"", ""architectures"": [""ChatGLMModel""], ""auto_map"": {""AutoConfig"": ""configuration_chatglm.ChatGLMConfig"", ""AutoModel"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForCausalLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSeq2SeqLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSequenceClassification"": ""modeling_chatglm.ChatGLMForSequenceClassification""}, ""tokenizer_config"": {""chat_template"": ""{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|> \n {{ message['content'] }}{% else %}<|{{ message['role'] }}|> \n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='MODEL_LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantization.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""eduagarcia/open_pt_llm_leaderboard"", ""Zulelee/langchain-chatchat"", ""larsthepenguin/trt-llm-rag-windows-main"", ""mohan007/sales_audio_analysis""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-04 08:42:38+00:00"", ""cardData"": ""language:\n- zh\n- en\ntags:\n- glm\n- chatglm\n- thudm"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""653a63fa32c97d06552cbbd8"", ""modelId"": ""THUDM/chatglm3-6b-32k"", ""usedStorage"": 24975454611}",0,https://huggingface.co/Tridefender/chatglm3_6b_32k_TensorRTReady,1,https://huggingface.co/Jasomniacs/tinglm,1,,0,,0,"Zulelee/langchain-chatchat, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=THUDM/chatglm3-6b-32k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTHUDM%2Fchatglm3-6b-32k%5D(%2FTHUDM%2Fchatglm3-6b-32k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, larsthepenguin/trt-llm-rag-windows-main, mohan007/sales_audio_analysis",5
102
+ Tridefender/chatglm3_6b_32k_TensorRTReady,"---
103
+ license: apache-2.0
104
+ base_model:
105
+ - THUDM/chatglm3-6b-32k
106
+ tags:
107
+ - text-generation-inference
108
+ language:
109
+ - zh
110
+ pipeline_tag: text-generation
111
+ library_name: transformers
112
+ ---
113
+ This is a derived model of chatglm3-6b-32k, has been converted to TensorRT LLM checkpoint for further usage. The model is presented in different quantizations.","{""id"": ""Tridefender/chatglm3_6b_32k_TensorRTReady"", ""author"": ""Tridefender"", ""sha"": ""49da6ea4dd1a58138ff18b4a24682deb6b467103"", ""last_modified"": ""2024-12-05 19:09:41+00:00"", ""created_at"": ""2024-12-03 14:47:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""text-generation-inference"", ""text-generation"", ""zh"", ""base_model:THUDM/chatglm3-6b-32k"", ""base_model:finetune:THUDM/chatglm3-6b-32k"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- THUDM/chatglm3-6b-32k\nlanguage:\n- zh\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- text-generation-inference"", ""widget_data"": [{""text"": ""\u6211\u53eb\u6731\u5229\u5b89\uff0c\u6211\u559c\u6b22""}, {""text"": ""\u6211\u53eb\u6258\u9a6c\u65af\uff0c\u6211\u7684\u4e3b\u8981""}, {""text"": ""\u6211\u53eb\u739b\u4e3d\u4e9a\uff0c\u6211\u6700\u559c\u6b22\u7684""}, {""text"": ""\u6211\u53eb\u514b\u62c9\u62c9\uff0c\u6211\u662f""}, {""text"": ""\u4ece\u524d\uff0c""}], ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/1-gpu/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fp16/1-gpu/rank0.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='int4_weight_only/1-gpu/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='int4_weight_only/1-gpu/rank0.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='int8_weight_only/1-gpu/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='int8_weight_only/1-gpu/rank0.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-05 19:09:41+00:00"", ""cardData"": ""base_model:\n- THUDM/chatglm3-6b-32k\nlanguage:\n- zh\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- text-generation-inference"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""674f1a12c8b2fa1eded3f985"", ""modelId"": ""Tridefender/chatglm3_6b_32k_TensorRTReady"", ""usedStorage"": 23240072216}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Tridefender/chatglm3_6b_32k_TensorRTReady&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTridefender%2Fchatglm3_6b_32k_TensorRTReady%5D(%2FTridefender%2Fchatglm3_6b_32k_TensorRTReady)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
chinese-roberta-wwm-ext_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
The diff for this file is too large to render. See raw diff
 
deepseek-coder-33b-instruct_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ deepseek-ai/deepseek-coder-33b-instruct,"---
3
+ license: other
4
+ license_name: deepseek
5
+ license_link: LICENSE
6
+ ---
7
+
8
+
9
+ <p align=""center"">
10
+ <img width=""1000px"" alt=""DeepSeek Coder"" src=""https://github.com/deepseek-ai/DeepSeek-Coder/blob/main/pictures/logo.png?raw=true"">
11
+ </p>
12
+ <p align=""center""><a href=""https://www.deepseek.com/"">[🏠Homepage]</a> | <a href=""https://coder.deepseek.com/"">[🤖 Chat with DeepSeek Coder]</a> | <a href=""https://discord.gg/Tc7c45Zzu5"">[Discord]</a> | <a href=""https://github.com/guoday/assert/blob/main/QR.png?raw=true"">[Wechat(微信)]</a> </p>
13
+ <hr>
14
+
15
+
16
+
17
+ ### 1. Introduction of Deepseek Coder
18
+
19
+ Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese. We provide various sizes of the code model, ranging from 1B to 33B versions. Each model is pre-trained on project-level code corpus by employing a window size of 16K and a extra fill-in-the-blank task, to support project-level code completion and infilling. For coding capabilities, Deepseek Coder achieves state-of-the-art performance among open-source code models on multiple programming languages and various benchmarks.
20
+
21
+ - **Massive Training Data**: Trained from scratch on 2T tokens, including 87% code and 13% linguistic data in both English and Chinese languages.
22
+
23
+ - **Highly Flexible & Scalable**: Offered in model sizes of 1.3B, 5.7B, 6.7B, and 33B, enabling users to choose the setup most suitable for their requirements.
24
+
25
+ - **Superior Model Performance**: State-of-the-art performance among publicly available code models on HumanEval, MultiPL-E, MBPP, DS-1000, and APPS benchmarks.
26
+
27
+ - **Advanced Code Completion Capabilities**: A window size of 16K and a fill-in-the-blank task, supporting project-level code completion and infilling tasks.
28
+
29
+
30
+
31
+ ### 2. Model Summary
32
+ deepseek-coder-33b-instruct is a 33B parameter model initialized from deepseek-coder-33b-base and fine-tuned on 2B tokens of instruction data.
33
+ - **Home Page:** [DeepSeek](https://deepseek.com/)
34
+ - **Repository:** [deepseek-ai/deepseek-coder](https://github.com/deepseek-ai/deepseek-coder)
35
+ - **Chat With DeepSeek Coder:** [DeepSeek-Coder](https://coder.deepseek.com/)
36
+
37
+
38
+ ### 3. How to Use
39
+ Here give some examples of how to use our model.
40
+ #### Chat Model Inference
41
+ ```python
42
+ from transformers import AutoTokenizer, AutoModelForCausalLM
43
+ tokenizer = AutoTokenizer.from_pretrained(""deepseek-ai/deepseek-coder-6.7b-instruct"", trust_remote_code=True)
44
+ model = AutoModelForCausalLM.from_pretrained(""deepseek-ai/deepseek-coder-6.7b-instruct"", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
45
+ messages=[
46
+ { 'role': 'user', 'content': ""write a quick sort algorithm in python.""}
47
+ ]
48
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"").to(model.device)
49
+ # tokenizer.eos_token_id is the id of <|EOT|> token
50
+ outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
51
+ print(tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True))
52
+ ```
53
+
54
+ ### 4. License
55
+ This code repository is licensed under the MIT License. The use of DeepSeek Coder models is subject to the Model License. DeepSeek Coder supports commercial use.
56
+
57
+ See the [LICENSE-MODEL](https://github.com/deepseek-ai/deepseek-coder/blob/main/LICENSE-MODEL) for more details.
58
+
59
+ ### 5. Contact
60
+
61
+ If you have any questions, please raise an issue or contact us at [agi_code@deepseek.com](mailto:agi_code@deepseek.com).
62
+
63
+ ","{""id"": ""deepseek-ai/deepseek-coder-33b-instruct"", ""author"": ""deepseek-ai"", ""sha"": ""61dc97b922b13995e7f83b7c8397701dbf9cfd4c"", ""last_modified"": ""2024-03-07 08:25:20+00:00"", ""created_at"": ""2023-11-01 05:46:34+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 8622, ""downloads_all_time"": null, ""likes"": 509, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""llama"", ""text-generation"", ""conversational"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: deepseek\nlicense_link: LICENSE"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<|EOT|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00007.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""bigcode/bigcode-models-leaderboard"", ""deepseek-ai/deepseek-coder-33b-instruct"", ""KBaba7/Quant"", ""Sarath0x8f/Document-QA-bot"", ""Justinrune/LLaMA-Factory"", ""HPAI-BSC/TuRTLe-Leaderboard"", ""yhavinga/dutch-tokenizer-arena"", ""kenken999/fastapi_django_main_live"", ""bhaskartripathi/LLM_Quantization"", ""21world/bigcode-models-leaderboard"", ""officialhimanshu595/llama-factory"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""bardsai/performance-llm-board"", ""Sivasubramoniam/deepseek-coder-33b-instruct"", ""ruslanmv/convert_to_gguf"", ""Tobidx/sales-email-generator"", ""Omanjelato/deepseek-ai-deepseek-coder-33b-instruct"", ""forceisthop/deepseek-ai-deepseek-coder-33b-instruct"", ""ChaitanyaNair06/deepseek-ai-deepseek-coder-33b-instruct"", ""Nikit-K/deepseek-ai-deepseek-coder-33b-instruct"", ""lynquantumman/deepseek-ai-deepseek-coder-33b-instruct"", ""caxlar/deepseek-ai-deepseek-coder-33b-instruct"", ""ValkTrippin/deepseek-ai-deepseek-coder-33b-instruct"", ""lordbasil/deepseek-ai-deepseek-coder-33b-instruct"", ""DoubleTechnologies/deepseek-ai-deepseek-coder-33b-instruct"", ""iblfe/test"", ""awacke1/deepseek-ai-deepseek-coder-33b-instruct"", ""kkkw/deepseek-ai-deepseek-coder-33b-instruct"", ""Dineth1222/nova-code"", ""Dineth1222/code_nova_writer"", ""Nymbo/deepseek-coder-33b-instruct"", ""arshadkm/deepseek-ai-deepseek-coder-33b-instruct"", ""Houssemeddine/deepseek-coder-33b-instruct"", ""Akirami/code-llm-explorer"", ""Xhaheen/AI_safety_testing"", ""Xhaheen/phoeniks_redteamers"", ""atlasas/bigcode-models-leaderboard"", ""JesseLepota/deepseek-coder-33b-instruct"", ""broadfield-dev/deepseek-coder-33b-instruct"", ""vWinter/testing-yay"", ""samdo20/deepseek-coder-33b-instruct"", ""berkanyildirim/yapay_zekam"", ""Ayaku/deepseek-coder-33b-instruct"", ""parthgajera320/deepseek-coder-33b-instruct"", ""msun415/Llamole"", ""Starchik/deepseek-coder-33b-instruc"", ""musedivision/game-editor"", ""kk20krishna/Multi-tool_SmolAgent"", ""borisyich/my_agent_template"", ""skjaini/DeepSeek-AINews"", ""PLBot/Journi_clean"", ""sailokesh/Hello_GPT"", ""eko-junaidi-salam/deepseek-coder-33b-instruct"", ""K00B404/LLM_Quantization"", ""fmlemos/zeroshot-chatbot-openrouter"", ""davidizzle/LIA_CodiceDaVinci""], ""safetensors"": {""parameters"": {""BF16"": 33342991360}, ""total"": 33342991360}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-07 08:25:20+00:00"", ""cardData"": ""license: other\nlicense_name: deepseek\nlicense_link: LICENSE"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6541e63a5019954eefa3162c"", ""modelId"": ""deepseek-ai/deepseek-coder-33b-instruct"", ""usedStorage"": 133372226408}",0,"https://huggingface.co/arvindanand/ValidateAI-2-33B-AT, https://huggingface.co/arvindanand/ValidateAI-3-33B-Ties, https://huggingface.co/ai-agnisys/lora_model, https://huggingface.co/ai-agnisys/ids-integrate-model",4,"https://huggingface.co/Ngit/mnv-deepseek33b, https://huggingface.co/NikitaZagainov/notebook-generation-deepseek-33b-2ep, https://huggingface.co/NikitaZagainov/notebook-generation-deepseek-33b-3ep, https://huggingface.co/NikitaZagainov/notebook-generation-deepseek-33b-1ep, https://huggingface.co/NikitaZagainov/notebook-generation-deepseek-33b-4ep, https://huggingface.co/NikitaZagainov/notebook-generation-deepseek-33b-5ep",6,"https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF, https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-AWQ, https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GPTQ, https://huggingface.co/altomek/deepseek-coder-33b-instruct-8bpw-EXL2, https://huggingface.co/mradermacher/deepseek-coder-33b-instruct-GGUF, https://huggingface.co/mradermacher/deepseek-coder-33b-instruct-i1-GGUF, https://huggingface.co/tensorblock/deepseek-coder-33b-instruct-GGUF, https://huggingface.co/TFSID/deepseek-coder-33b-instruct-Q4_K_M-GGUF",8,"https://huggingface.co/smokewulf/Code-01, https://huggingface.co/arvindanand/Deepseek-Wizard-33B-slerp",2,"21world/bigcode-models-leaderboard, FallnAI/Quantize-HF-Models, HPAI-BSC/TuRTLe-Leaderboard, Justinrune/LLaMA-Factory, KBaba7/Quant, Sarath0x8f/Document-QA-bot, bardsai/performance-llm-board, bhaskartripathi/LLM_Quantization, bigcode/bigcode-models-leaderboard, deepseek-ai/deepseek-coder-33b-instruct, huggingface/InferenceSupport/discussions/new?title=deepseek-ai/deepseek-coder-33b-instruct&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdeepseek-ai%2Fdeepseek-coder-33b-instruct%5D(%2Fdeepseek-ai%2Fdeepseek-coder-33b-instruct)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, kenken999/fastapi_django_main_live, yhavinga/dutch-tokenizer-arena",13
64
+ arvindanand/ValidateAI-2-33B-AT,"---
65
+ tags:
66
+ - merge
67
+ - mergekit
68
+ - lazymergekit
69
+ - WizardLM/WizardCoder-33B-V1.1
70
+ - codefuse-ai/CodeFuse-DeepSeek-33B
71
+ - deepseek-ai/deepseek-coder-33b-instruct
72
+
73
+ base_model:
74
+ - deepseek-ai/deepseek-coder-33b-instruct
75
+
76
+ license: apache-2.0
77
+ ---
78
+
79
+ # ValidateAI-2-33B-AT
80
+
81
+ ValidateAI-2-33B-AT is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
82
+ * [deepseek-ai/deepseek-coder-33b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct)
83
+ * [WizardLM/WizardCoder-33B-V1.1](https://huggingface.co/WizardLM/WizardCoder-33B-V1.1)
84
+ * [codefuse-ai/CodeFuse-DeepSeek-33B](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B)
85
+ *
86
+
87
+ ## 🧩 Configuration
88
+
89
+ ```yaml
90
+ models:
91
+ - model: codefuse-ai_CodeFuse-DeepSeek-33B
92
+ parameters:
93
+ weight: 1
94
+ - model: deepseek-ai_deepseek-coder-33b-instruct
95
+ parameters:
96
+ weight: 1
97
+ - model: WizardLM_WizardCoder-33B-V1.1
98
+ parameters:
99
+ weight: 1
100
+ merge_method: task_arithmetic
101
+ base_model: deepseek-ai_deepseek-coder-33b-base
102
+ parameters:
103
+ normalize: true
104
+ int8_mask: true
105
+ dtype: float16
106
+ ```
107
+
108
+ ## 💻 Usage
109
+
110
+ ```python
111
+ !pip install -qU transformers accelerate
112
+
113
+ from transformers import AutoTokenizer
114
+ import transformers
115
+ import torch
116
+
117
+ model = ""arvindanand/ValidateAI-2-33B-AT""
118
+ messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
119
+
120
+ tokenizer = AutoTokenizer.from_pretrained(model)
121
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
122
+ pipeline = transformers.pipeline(
123
+ ""text-generation"",
124
+ model=model,
125
+ torch_dtype=torch.float16,
126
+ device_map=""auto"",
127
+ )
128
+
129
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
130
+ print(outputs[0][""generated_text""])
131
+ ```
132
+
133
+
134
+ ","{""id"": ""arvindanand/ValidateAI-2-33B-AT"", ""author"": ""arvindanand"", ""sha"": ""8ced5df95de3d9bf4052028cd02fd52c1f9a5256"", ""last_modified"": ""2024-04-16 02:06:40+00:00"", ""created_at"": ""2024-04-11 07:55:04+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""WizardLM/WizardCoder-33B-V1.1"", ""codefuse-ai/CodeFuse-DeepSeek-33B"", ""deepseek-ai/deepseek-coder-33b-instruct"", ""base_model:deepseek-ai/deepseek-coder-33b-instruct"", ""base_model:finetune:deepseek-ai/deepseek-coder-33b-instruct"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-coder-33b-instruct\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- lazymergekit\n- WizardLM/WizardCoder-33B-V1.1\n- codefuse-ai/CodeFuse-DeepSeek-33B\n- deepseek-ai/deepseek-coder-33b-instruct"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 33342991360}, ""total"": 33342991360}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-16 02:06:40+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-coder-33b-instruct\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- lazymergekit\n- WizardLM/WizardCoder-33B-V1.1\n- codefuse-ai/CodeFuse-DeepSeek-33B\n- deepseek-ai/deepseek-coder-33b-instruct"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66179758feb710dbf16d9a23"", ""modelId"": ""arvindanand/ValidateAI-2-33B-AT"", ""usedStorage"": 66686047680}",1,,0,,0,"https://huggingface.co/mradermacher/ValidateAI-2-33B-AT-GGUF, https://huggingface.co/mradermacher/ValidateAI-2-33B-AT-i1-GGUF",2,,0,huggingface/InferenceSupport/discussions/new?title=arvindanand/ValidateAI-2-33B-AT&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Barvindanand%2FValidateAI-2-33B-AT%5D(%2Farvindanand%2FValidateAI-2-33B-AT)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
135
+ arvindanand/ValidateAI-3-33B-Ties,"---
136
+ tags:
137
+ - merge
138
+ - mergekit
139
+ - lazymergekit
140
+ - WizardLM/WizardCoder-33B-V1.1
141
+ - codefuse-ai/CodeFuse-DeepSeek-33B
142
+ - deepseek-ai/deepseek-coder-33b-instruct
143
+
144
+ base_model:
145
+ - deepseek-ai/deepseek-coder-33b-instruct
146
+
147
+ license: apache-2.0
148
+ ---
149
+
150
+ # ValidateAI-2-33B-Ties
151
+
152
+ ValidateAI-2-33B-Ties is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
153
+ * [deepseek-ai/deepseek-coder-33b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct)
154
+ * [WizardLM/WizardCoder-33B-V1.1](https://huggingface.co/WizardLM/WizardCoder-33B-V1.1)
155
+ * [codefuse-ai/CodeFuse-DeepSeek-33B](https://huggingface.co/codefuse-ai/CodeFuse-DeepSeek-33B)
156
+ *
157
+
158
+ ## 🧩 Configuration
159
+
160
+ ```yaml
161
+ models:
162
+ - model: WizardLM_WizardCoder-33B-V1.1
163
+ parameters:
164
+ density: 1
165
+ weight: .5
166
+ - model: codefuse-ai_CodeFuse-DeepSeek-33B
167
+ parameters:
168
+ density: 1
169
+ weight: .5
170
+ merge_method: ties
171
+ base_model: deepseek-ai_deepseek-coder-33b-instruct
172
+ parameters:
173
+ normalize: true
174
+ int8_mask: true
175
+ dtype: float16
176
+ ```
177
+
178
+ ## 💻 Usage
179
+
180
+ ```python
181
+ !pip install -qU transformers accelerate
182
+
183
+ from transformers import AutoTokenizer
184
+ import transformers
185
+ import torch
186
+
187
+ model = ""arvindanand/ValidateAI-3-33B-Ties""
188
+ messages = [{""role"": ""user"", ""content"": ""What is a large language model?""}]
189
+
190
+ tokenizer = AutoTokenizer.from_pretrained(model)
191
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
192
+ pipeline = transformers.pipeline(
193
+ ""text-generation"",
194
+ model=model,
195
+ torch_dtype=torch.float16,
196
+ device_map=""auto"",
197
+ )
198
+
199
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
200
+ print(outputs[0][""generated_text""])
201
+ ```","{""id"": ""arvindanand/ValidateAI-3-33B-Ties"", ""author"": ""arvindanand"", ""sha"": ""f556cfcf87f5ab1a88a296a9fa155be369c2f291"", ""last_modified"": ""2024-04-11 19:19:20+00:00"", ""created_at"": ""2024-04-11 19:18:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""merge"", ""mergekit"", ""lazymergekit"", ""WizardLM/WizardCoder-33B-V1.1"", ""codefuse-ai/CodeFuse-DeepSeek-33B"", ""deepseek-ai/deepseek-coder-33b-instruct"", ""conversational"", ""base_model:deepseek-ai/deepseek-coder-33b-instruct"", ""base_model:finetune:deepseek-ai/deepseek-coder-33b-instruct"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- deepseek-ai/deepseek-coder-33b-instruct\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- lazymergekit\n- WizardLM/WizardCoder-33B-V1.1\n- codefuse-ai/CodeFuse-DeepSeek-33B\n- deepseek-ai/deepseek-coder-33b-instruct"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<|EOT|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F16"": 33342991360}, ""total"": 33342991360}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-11 19:19:20+00:00"", ""cardData"": ""base_model:\n- deepseek-ai/deepseek-coder-33b-instruct\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- lazymergekit\n- WizardLM/WizardCoder-33B-V1.1\n- codefuse-ai/CodeFuse-DeepSeek-33B\n- deepseek-ai/deepseek-coder-33b-instruct"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""661837812a39149b64d577cc"", ""modelId"": ""arvindanand/ValidateAI-3-33B-Ties"", ""usedStorage"": 66686047680}",1,,0,,0,https://huggingface.co/mradermacher/ValidateAI-3-33B-Ties-GGUF,1,,0,huggingface/InferenceSupport/discussions/new?title=arvindanand/ValidateAI-3-33B-Ties&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Barvindanand%2FValidateAI-3-33B-Ties%5D(%2Farvindanand%2FValidateAI-3-33B-Ties)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
202
+ ai-agnisys/lora_model,"---
203
+ base_model: deepseek-ai/deepseek-coder-33b-instruct
204
+ tags:
205
+ - text-generation-inference
206
+ - transformers
207
+ - unsloth
208
+ - llama
209
+ - trl
210
+ license: apache-2.0
211
+ language:
212
+ - en
213
+ ---
214
+
215
+ # Uploaded model
216
+
217
+ - **Developed by:** ai-agnisys
218
+ - **License:** apache-2.0
219
+ - **Finetuned from model :** deepseek-ai/deepseek-coder-33b-instruct
220
+
221
+ This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
222
+
223
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
224
+ ","{""id"": ""ai-agnisys/lora_model"", ""author"": ""ai-agnisys"", ""sha"": ""ef3123a3c600997ce9404fdb2ac646f0f67c48e3"", ""last_modified"": ""2025-02-21 05:22:20+00:00"", ""created_at"": ""2025-02-21 05:12:18+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""llama"", ""trl"", ""en"", ""base_model:deepseek-ai/deepseek-coder-33b-instruct"", ""base_model:finetune:deepseek-ai/deepseek-coder-33b-instruct"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/deepseek-coder-33b-instruct\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if 'role' in messages[0] %}{% for message in messages %}{% if message['role'] == 'user' %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% else %}{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-21 05:22:20+00:00"", ""cardData"": ""base_model: deepseek-ai/deepseek-coder-33b-instruct\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67b80b3266410e6ab8d48db9"", ""modelId"": ""ai-agnisys/lora_model"", ""usedStorage"": 492784136}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ai-agnisys/lora_model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bai-agnisys%2Flora_model%5D(%2Fai-agnisys%2Flora_model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
225
+ ai-agnisys/ids-integrate-model,"---
226
+ base_model: deepseek-ai/deepseek-coder-33b-instruct
227
+ tags:
228
+ - text-generation-inference
229
+ - transformers
230
+ - unsloth
231
+ - llama
232
+ - trl
233
+ license: apache-2.0
234
+ language:
235
+ - en
236
+ ---
237
+
238
+ # Uploaded model
239
+
240
+ - **Developed by:** ai-agnisys
241
+ - **License:** apache-2.0
242
+ - **Finetuned from model :** deepseek-ai/deepseek-coder-33b-instruct
243
+
244
+ This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
245
+
246
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
247
+ ","{""id"": ""ai-agnisys/ids-integrate-model"", ""author"": ""ai-agnisys"", ""sha"": ""113da3b775fb22825f102cb754ddd663c82a7913"", ""last_modified"": ""2025-04-08 08:49:43+00:00"", ""created_at"": ""2025-04-08 08:49:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""text-generation-inference"", ""unsloth"", ""llama"", ""trl"", ""en"", ""base_model:deepseek-ai/deepseek-coder-33b-instruct"", ""base_model:finetune:deepseek-ai/deepseek-coder-33b-instruct"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/deepseek-coder-33b-instruct\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": null, ""model_index"": null, ""config"": {""tokenizer_config"": {""bos_token"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""chat_template"": ""{% if 'role' in messages[0] %}{% for message in messages %}{% if message['role'] == 'user' %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% else %}{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}{% endif %}"", ""eos_token"": ""<|im_end|>"", ""pad_token"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""unk_token"": null, ""use_default_system_prompt"": false}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-08 08:49:43+00:00"", ""cardData"": ""base_model: deepseek-ai/deepseek-coder-33b-instruct\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67f4e3157d59d3812fd314ed"", ""modelId"": ""ai-agnisys/ids-integrate-model"", ""usedStorage"": 492784136}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ai-agnisys/ids-integrate-model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bai-agnisys%2Fids-integrate-model%5D(%2Fai-agnisys%2Fids-integrate-model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
emotion-english-distilroberta-base_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ j-hartmann/emotion-english-distilroberta-base,"---
3
+ language: ""en""
4
+ tags:
5
+ - distilroberta
6
+ - sentiment
7
+ - emotion
8
+ - twitter
9
+ - reddit
10
+
11
+ widget:
12
+ - text: ""Oh wow. I didn't know that.""
13
+ - text: ""This movie always makes me cry..""
14
+ - text: ""Oh Happy Day""
15
+
16
+ ---
17
+
18
+ # Emotion English DistilRoBERTa-base
19
+
20
+ # Description ℹ
21
+
22
+ With this model, you can classify emotions in English text data. The model was trained on 6 diverse datasets (see Appendix below) and predicts Ekman's 6 basic emotions, plus a neutral class:
23
+
24
+ 1) anger 🤬
25
+ 2) disgust 🤢
26
+ 3) fear 😨
27
+ 4) joy 😀
28
+ 5) neutral 😐
29
+ 6) sadness 😭
30
+ 7) surprise 😲
31
+
32
+ The model is a fine-tuned checkpoint of [DistilRoBERTa-base](https://huggingface.co/distilroberta-base). For a 'non-distilled' emotion model, please refer to the model card of the [RoBERTa-large](https://huggingface.co/j-hartmann/emotion-english-roberta-large) version.
33
+
34
+ # Application 🚀
35
+
36
+ a) Run emotion model with 3 lines of code on single text example using Hugging Face's pipeline command on Google Colab:
37
+
38
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/j-hartmann/emotion-english-distilroberta-base/blob/main/simple_emotion_pipeline.ipynb)
39
+
40
+ ```python
41
+ from transformers import pipeline
42
+ classifier = pipeline(""text-classification"", model=""j-hartmann/emotion-english-distilroberta-base"", return_all_scores=True)
43
+ classifier(""I love this!"")
44
+ ```
45
+
46
+ ```python
47
+ Output:
48
+ [[{'label': 'anger', 'score': 0.004419783595949411},
49
+ {'label': 'disgust', 'score': 0.0016119900392368436},
50
+ {'label': 'fear', 'score': 0.0004138521908316761},
51
+ {'label': 'joy', 'score': 0.9771687984466553},
52
+ {'label': 'neutral', 'score': 0.005764586851000786},
53
+ {'label': 'sadness', 'score': 0.002092392183840275},
54
+ {'label': 'surprise', 'score': 0.008528684265911579}]]
55
+ ```
56
+
57
+ b) Run emotion model on multiple examples and full datasets (e.g., .csv files) on Google Colab:
58
+
59
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/j-hartmann/emotion-english-distilroberta-base/blob/main/emotion_prediction_example.ipynb)
60
+
61
+ # Contact 💻
62
+
63
+ Please reach out to [jochen.hartmann@tum.de](mailto:jochen.hartmann@tum.de) if you have any questions or feedback.
64
+
65
+ Thanks to Samuel Domdey and [chrsiebert](https://huggingface.co/siebert) for their support in making this model available.
66
+
67
+ # Reference ✅
68
+
69
+ For attribution, please cite the following reference if you use this model. A working paper will be available soon.
70
+
71
+ ```
72
+ Jochen Hartmann, ""Emotion English DistilRoBERTa-base"". https://huggingface.co/j-hartmann/emotion-english-distilroberta-base/, 2022.
73
+ ```
74
+
75
+ BibTex citation:
76
+
77
+ ```
78
+ @misc{hartmann2022emotionenglish,
79
+ author={Hartmann, Jochen},
80
+ title={Emotion English DistilRoBERTa-base},
81
+ year={2022},
82
+ howpublished = {\url{https://huggingface.co/j-hartmann/emotion-english-distilroberta-base/}},
83
+ }
84
+ ```
85
+
86
+ # Appendix 📚
87
+
88
+ Please find an overview of the datasets used for training below. All datasets contain English text. The table summarizes which emotions are available in each of the datasets. The datasets represent a diverse collection of text types. Specifically, they contain emotion labels for texts from Twitter, Reddit, student self-reports, and utterances from TV dialogues. As MELD (Multimodal EmotionLines Dataset) extends the popular EmotionLines dataset, EmotionLines itself is not included here.
89
+
90
+ |Name|anger|disgust|fear|joy|neutral|sadness|surprise|
91
+ |---|---|---|---|---|---|---|---|
92
+ |Crowdflower (2016)|Yes|-|-|Yes|Yes|Yes|Yes|
93
+ |Emotion Dataset, Elvis et al. (2018)|Yes|-|Yes|Yes|-|Yes|Yes|
94
+ |GoEmotions, Demszky et al. (2020)|Yes|Yes|Yes|Yes|Yes|Yes|Yes|
95
+ |ISEAR, Vikash (2018)|Yes|Yes|Yes|Yes|-|Yes|-|
96
+ |MELD, Poria et al. (2019)|Yes|Yes|Yes|Yes|Yes|Yes|Yes|
97
+ |SemEval-2018, EI-reg, Mohammad et al. (2018) |Yes|-|Yes|Yes|-|Yes|-|
98
+
99
+ The model is trained on a balanced subset from the datasets listed above (2,811 observations per emotion, i.e., nearly 20k observations in total). 80% of this balanced subset is used for training and 20% for evaluation. The evaluation accuracy is 66% (vs. the random-chance baseline of 1/7 = 14%).
100
+
101
+ # Scientific Applications 📖
102
+
103
+ Below you can find a list of papers using ""Emotion English DistilRoBERTa-base"". If you would like your paper to be added to the list, please send me an email.
104
+
105
+ - Butt, S., Sharma, S., Sharma, R., Sidorov, G., & Gelbukh, A. (2022). What goes on inside rumour and non-rumour tweets and their reactions: A Psycholinguistic Analyses. Computers in Human Behavior, 107345.
106
+ - Kuang, Z., Zong, S., Zhang, J., Chen, J., & Liu, H. (2022). Music-to-Text Synaesthesia: Generating Descriptive Text from Music Recordings. arXiv preprint arXiv:2210.00434.
107
+ - Rozado, D., Hughes, R., & Halberstadt, J. (2022). Longitudinal analysis of sentiment and emotion in news media headlines using automated labelling with Transformer language models. Plos one, 17(10), e0276367.","{""id"": ""j-hartmann/emotion-english-distilroberta-base"", ""author"": ""j-hartmann"", ""sha"": ""0e1cd914e3d46199ed785853e12b57304e04178b"", ""last_modified"": ""2023-01-02 13:03:10+00:00"", ""created_at"": ""2022-03-02 23:29:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1093905, ""downloads_all_time"": null, ""likes"": 402, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tf"", ""roberta"", ""text-classification"", ""distilroberta"", ""sentiment"", ""emotion"", ""twitter"", ""reddit"", ""en"", ""arxiv:2210.00434"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""language: en\ntags:\n- distilroberta\n- sentiment\n- emotion\n- twitter\n- reddit\nwidget:\n- text: Oh wow. I didn't know that.\n- text: This movie always makes me cry..\n- text: Oh Happy Day"", ""widget_data"": [{""text"": ""Oh wow. I didn't know that.""}, {""text"": ""This movie always makes me cry..""}, {""text"": ""Oh Happy Day""}], ""model_index"": null, ""config"": {""architectures"": [""RobertaForSequenceClassification""], ""model_type"": ""roberta"", ""tokenizer_config"": {""unk_token"": ""<unk>"", ""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""sep_token"": ""</s>"", ""cls_token"": ""<s>"", ""pad_token"": ""<pad>"", ""mask_token"": ""<mask>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ja-818/speech_and_text_emotion_recognition"", ""j-hartmann/emotion-classification-from-csv"", ""Roozeec/World-News-Analysis"", ""Pradheep1647/multi-modal-emotion-recognition"", ""j-hartmann/emotion-similarity"", ""gshotwell/multi-query-sentiment"", ""kparkhade/nlp-genius"", ""dbleek/cs-gy-6613-project-final"", ""Alesmikes/speech_and_text_emotion_recognition"", ""HEHEBOIOG/NewsDistil"", ""raphgonda/MultimodalEmotionAnalysis"", ""Pranav0111/Emotion_detector"", ""invincible-jha/MentalHealthVocalBiomarkers"", ""jitesh/storytelling"", ""awacke1/CSVSentiment"", ""awacke1/CSV2ClassifyVisualization"", ""poiiii/clefourrier-graphormer-base-pcqm4mv1"", ""lRoz/j-hartmann-emotion-english-distilroberta-base"", ""elitecode/Detect_Emotions"", ""abrar-adnan/speech-analyzer"", ""awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary"", ""jpdiazpardo/jpdiazpardo-whisper-tiny-metal"", ""ananyachavan/Speechemo"", ""Soham98/EmotionDetection"", ""vitorcalvi/MMESA-CPU"", ""batlahiya/Urban_Sentiment_Analysis"", ""nehapasricha94/LLaVA-image-analysis"", ""nehapasricha94/llava-image-analysis-v2"", ""Anupam251272/AI-Speech-Analysis-App-RAG"", ""nayaaoun/nlp_yaya_antz"", ""krushna123/nlp-sentiment-app"", ""jaisun2004/atmasessionsummarizer"", ""safwansajad/Emotion_Detection_GPT"", ""Suzana/text_basic_emotions"", ""asingh6/patient-sentiment-analysis"", ""Puree/Lab_10"", ""samueldomdey/Emotion"", ""Guynaris/Lab10_620510564"", ""dbleek/cs-gy-6613-project"", ""jlaf01/sentiment-analysis-app"", ""dperales/ITACA_Insurance_Core_v4"", ""whitphx/streamlit-multi-query-sentiment-analysis"", ""n4th/nlp-reviews"", ""Pruthul/CS482-104-Milestone2"", ""Ozziey/emotion-classifier-from-string"", ""dk3156/sentiment_analysis_app"", ""xingxing12/j-hartmann-emotion-english-distilroberta-base"", ""kya5/CS482-Milestone2"", ""danielperales/ITACA_Insurace_NLP_v2"", ""JKJanosko/Toxicity-Analysis"", ""asdadaa/j-hartmann-emotion-english-distilroberta-base"", ""ShadowDominator/emotion-classification"", ""ShadowDominator/sentiment-analysis"", ""darthPanda/SentimentAnalysisTool"", ""kahvem/aisense"", ""mohanchinnappan/senti"", ""leolinardi/j-hartmann-emotion-english-distilroberta-base"", ""utkarsh95m/j-hartmann-emotion-english-distilroberta-base"", ""devanshsrivastav/GoEmotions"", ""After-the-Dark/sentiment-analysis"", ""SudhanshuBlaze/EmoDash"", ""qnbhd/dp-dialogs-21"", ""devanshsrivastav/goemotions2"", ""dynamicmortal/model_testing"", ""dynamicmortal/outlines"", ""arabooke/j-hartmann-emotion-english-distilroberta-base"", ""miscjose/Data-Annotation-Tool"", ""AliXaidi/Emotion_Classifier"", ""AroojImtiaz/Emotions_Classification"", ""ganeshkamath89/World-News-Analysis"", ""KulsoomBibi/Emotion_Detection"", ""dawng88/sachacks2023"", ""kanra208/NewAgeAI"", ""GiladtheFixer/textClassification"", ""Pavani2704/tasks"", ""Manoj6304/Emotion_Dectection"", ""rafaldembski/World-News-Analysis"", ""mksaad/Emotion-detector"", ""mksaad/emotion-detection-eng"", ""ahmedElsaghir/Emotion-detector"", ""EsoCode/poem_analysis"", ""ghuman7/Depreesion"", ""Eesha123/Link"", ""Eesha123/eesha1"", ""mksaad/EmotionDetectionApp"", ""ANashaat143/emotion_detection_App"", ""balaji0810/genaifeatures"", ""ram2201/genaiutils"", ""vitorcalvi/mmesa-gpu-gitex"", ""Rafay17/sentiment_analysis"", ""mksaad/emotion-transfomer"", ""martella/emotion-transformer"", ""SaraDanaKablTalabani/j-hartmann-emotion-english-distilroberta-base"", ""pj164/dbp"", ""availe/ai-emotion-analyzer"", ""mars1198/proba_space"", ""izhan001/Virtual-Psychologist"", ""izhan001/personal-psychologist"", ""izhan001/Psych-Buddy"", ""JSenkCC/MovieRecommender""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-01-02 13:03:10+00:00"", ""cardData"": ""language: en\ntags:\n- distilroberta\n- sentiment\n- emotion\n- twitter\n- reddit\nwidget:\n- text: Oh wow. I didn't know that.\n- text: This movie always makes me cry..\n- text: Oh Happy Day"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""621ffdc136468d709f17c9d0"", ""modelId"": ""j-hartmann/emotion-english-distilroberta-base"", ""usedStorage"": 1642857094}",0,"https://huggingface.co/lunadebruyne/test_trainer, https://huggingface.co/zuriati/results, https://huggingface.co/shengqizhao0124/emotion_trainer",3,,0,,0,,0,"Pradheep1647/multi-modal-emotion-recognition, Pranav0111/Emotion_detector, Rafay17/sentiment_analysis, Roozeec/World-News-Analysis, batlahiya/Urban_Sentiment_Analysis, dbleek/cs-gy-6613-project-final, gshotwell/multi-query-sentiment, huggingface/InferenceSupport/discussions/new?title=j-hartmann/emotion-english-distilroberta-base&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bj-hartmann%2Femotion-english-distilroberta-base%5D(%2Fj-hartmann%2Femotion-english-distilroberta-base)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, j-hartmann/emotion-classification-from-csv, j-hartmann/emotion-similarity, ja-818/speech_and_text_emotion_recognition, kparkhade/nlp-genius, raphgonda/MultimodalEmotionAnalysis",13
108
+ lunadebruyne/test_trainer,"---
109
+ base_model: j-hartmann/emotion-english-distilroberta-base
110
+ tags:
111
+ - generated_from_trainer
112
+ model-index:
113
+ - name: test_trainer
114
+ results: []
115
+ ---
116
+
117
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
118
+ should probably proofread and complete it, then remove this comment. -->
119
+
120
+ # test_trainer
121
+
122
+ This model is a fine-tuned version of [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) on an unknown dataset.
123
+
124
+ ## Model description
125
+
126
+ More information needed
127
+
128
+ ## Intended uses & limitations
129
+
130
+ More information needed
131
+
132
+ ## Training and evaluation data
133
+
134
+ More information needed
135
+
136
+ ## Training procedure
137
+
138
+ ### Training hyperparameters
139
+
140
+ The following hyperparameters were used during training:
141
+ - learning_rate: 5e-05
142
+ - train_batch_size: 8
143
+ - eval_batch_size: 8
144
+ - seed: 42
145
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
146
+ - lr_scheduler_type: linear
147
+ - num_epochs: 3.0
148
+
149
+ ### Training results
150
+
151
+
152
+
153
+ ### Framework versions
154
+
155
+ - Transformers 4.37.2
156
+ - Pytorch 2.1.0+cu121
157
+ - Tokenizers 0.15.2
158
+ ","{""id"": ""lunadebruyne/test_trainer"", ""author"": ""lunadebruyne"", ""sha"": ""23ca94b95d863acb6b2ffd48ec1e6a7c7e33368a"", ""last_modified"": ""2024-02-21 11:07:29+00:00"", ""created_at"": ""2024-02-21 11:07:14+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 11, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""roberta"", ""text-classification"", ""generated_from_trainer"", ""base_model:j-hartmann/emotion-english-distilroberta-base"", ""base_model:finetune:j-hartmann/emotion-english-distilroberta-base"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: j-hartmann/emotion-english-distilroberta-base\ntags:\n- generated_from_trainer\nmodel-index:\n- name: test_trainer\n results: []"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": [{""name"": ""test_trainer"", ""results"": []}], ""config"": {""architectures"": [""RobertaForSequenceClassification""], ""model_type"": ""roberta""}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-30-12_385d7f6a99a0/events.out.tfevents.1708511413.385d7f6a99a0.950.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-33-10_385d7f6a99a0/events.out.tfevents.1708511591.385d7f6a99a0.950.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-34-02_385d7f6a99a0/events.out.tfevents.1708511642.385d7f6a99a0.950.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-34-22_385d7f6a99a0/events.out.tfevents.1708511662.385d7f6a99a0.950.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-38-01_385d7f6a99a0/events.out.tfevents.1708511882.385d7f6a99a0.950.4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-38-17_385d7f6a99a0/events.out.tfevents.1708511899.385d7f6a99a0.950.5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-42-33_385d7f6a99a0/events.out.tfevents.1708512154.385d7f6a99a0.950.6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-44-52_385d7f6a99a0/events.out.tfevents.1708512293.385d7f6a99a0.950.7', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_10-45-56_385d7f6a99a0/events.out.tfevents.1708512356.385d7f6a99a0.950.8', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_11-00-19_385d7f6a99a0/events.out.tfevents.1708513220.385d7f6a99a0.950.9', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb21_11-07-07_385d7f6a99a0/events.out.tfevents.1708513627.385d7f6a99a0.950.10', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 82124552}, ""total"": 82124552}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-21 11:07:29+00:00"", ""cardData"": ""base_model: j-hartmann/emotion-english-distilroberta-base\ntags:\n- generated_from_trainer\nmodel-index:\n- name: test_trainer\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65d5d9624f9684c48f4dedea"", ""modelId"": ""lunadebruyne/test_trainer"", ""usedStorage"": 328569723}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=lunadebruyne/test_trainer&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Blunadebruyne%2Ftest_trainer%5D(%2Flunadebruyne%2Ftest_trainer)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
159
+ zuriati/results,"---
160
+ library_name: transformers
161
+ base_model: j-hartmann/emotion-english-distilroberta-base
162
+ tags:
163
+ - generated_from_trainer
164
+ metrics:
165
+ - accuracy
166
+ model-index:
167
+ - name: results
168
+ results: []
169
+ ---
170
+
171
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
172
+ should probably proofread and complete it, then remove this comment. -->
173
+
174
+ # results
175
+
176
+ This model is a fine-tuned version of [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) on an unknown dataset.
177
+ It achieves the following results on the evaluation set:
178
+ - Loss: 0.3514
179
+ - Accuracy: 0.9365
180
+
181
+ ## Model description
182
+
183
+ More information needed
184
+
185
+ ## Intended uses & limitations
186
+
187
+ More information needed
188
+
189
+ ## Training and evaluation data
190
+
191
+ More information needed
192
+
193
+ ## Training procedure
194
+
195
+ ### Training hyperparameters
196
+
197
+ The following hyperparameters were used during training:
198
+ - learning_rate: 2e-05
199
+ - train_batch_size: 16
200
+ - eval_batch_size: 64
201
+ - seed: 42
202
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
203
+ - lr_scheduler_type: linear
204
+ - num_epochs: 3
205
+
206
+ ### Training results
207
+
208
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
209
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
210
+ | 0.0542 | 1.0 | 1000 | 0.3514 | 0.9365 |
211
+ | 0.0515 | 2.0 | 2000 | 0.3438 | 0.935 |
212
+ | 0.0825 | 3.0 | 3000 | 0.3766 | 0.9315 |
213
+
214
+
215
+ ### Framework versions
216
+
217
+ - Transformers 4.48.3
218
+ - Pytorch 2.5.1+cu124
219
+ - Datasets 3.3.2
220
+ - Tokenizers 0.21.0
221
+ ","{""id"": ""zuriati/results"", ""author"": ""zuriati"", ""sha"": ""3da1b66781997b47a143da19c14be48639496c7c"", ""last_modified"": ""2025-03-03 09:43:18+00:00"", ""created_at"": ""2025-03-03 09:41:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""roberta"", ""text-classification"", ""generated_from_trainer"", ""base_model:j-hartmann/emotion-english-distilroberta-base"", ""base_model:finetune:j-hartmann/emotion-english-distilroberta-base"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: j-hartmann/emotion-english-distilroberta-base\nlibrary_name: transformers\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": [{""name"": ""results"", ""results"": []}], ""config"": {""architectures"": [""RobertaForSequenceClassification""], ""model_type"": ""roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar03_07-19-03_b19b2cc4aed0/events.out.tfevents.1740986345.b19b2cc4aed0.5627.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 82123783}, ""total"": 82123783}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-03 09:43:18+00:00"", ""cardData"": ""base_model: j-hartmann/emotion-english-distilroberta-base\nlibrary_name: transformers\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: results\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67c5794a64747ab8d4504393"", ""modelId"": ""zuriati/results"", ""usedStorage"": 328521007}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=zuriati/results&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bzuriati%2Fresults%5D(%2Fzuriati%2Fresults)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
222
+ shengqizhao0124/emotion_trainer,"---
223
+ library_name: transformers
224
+ base_model: j-hartmann/emotion-english-distilroberta-base
225
+ tags:
226
+ - generated_from_trainer
227
+ metrics:
228
+ - accuracy
229
+ - precision
230
+ - recall
231
+ - f1
232
+ model-index:
233
+ - name: emotion_trainer
234
+ results: []
235
+ ---
236
+
237
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
238
+ should probably proofread and complete it, then remove this comment. -->
239
+
240
+ # emotion_trainer
241
+
242
+ This model is a fine-tuned version of [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) on an unknown dataset.
243
+ It achieves the following results on the evaluation set:
244
+ - Loss: 0.3926
245
+ - Accuracy: 0.926
246
+ - Precision: 0.9268
247
+ - Recall: 0.926
248
+ - F1: 0.9257
249
+
250
+ ## Model description
251
+
252
+ More information needed
253
+
254
+ ## Intended uses & limitations
255
+
256
+ More information needed
257
+
258
+ ## Training and evaluation data
259
+
260
+ More information needed
261
+
262
+ ## Training procedure
263
+
264
+ ### Training hyperparameters
265
+
266
+ The following hyperparameters were used during training:
267
+ - learning_rate: 5e-05
268
+ - train_batch_size: 8
269
+ - eval_batch_size: 8
270
+ - seed: 42
271
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
272
+ - lr_scheduler_type: linear
273
+ - num_epochs: 5
274
+
275
+ ### Training results
276
+
277
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
278
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
279
+ | 0.2788 | 1.0 | 625 | 0.3926 | 0.926 | 0.9268 | 0.926 | 0.9257 |
280
+ | 0.04 | 2.0 | 1250 | 0.4845 | 0.922 | 0.9232 | 0.922 | 0.9214 |
281
+ | 0.0834 | 3.0 | 1875 | 0.4531 | 0.93 | 0.9313 | 0.93 | 0.9303 |
282
+ | 0.0364 | 4.0 | 2500 | 0.5419 | 0.9225 | 0.9233 | 0.9225 | 0.9221 |
283
+ | 0.0252 | 5.0 | 3125 | 0.5278 | 0.9265 | 0.9261 | 0.9265 | 0.9260 |
284
+
285
+
286
+ ### Framework versions
287
+
288
+ - Transformers 4.50.0
289
+ - Pytorch 2.6.0+cu124
290
+ - Datasets 3.4.1
291
+ - Tokenizers 0.21.1
292
+ ","{""id"": ""shengqizhao0124/emotion_trainer"", ""author"": ""shengqizhao0124"", ""sha"": ""3421b9a09cd42c42a065b3490b26f3dab1780a62"", ""last_modified"": ""2025-03-27 11:53:08+00:00"", ""created_at"": ""2025-03-27 10:09:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 103, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""roberta"", ""text-classification"", ""generated_from_trainer"", ""base_model:j-hartmann/emotion-english-distilroberta-base"", ""base_model:finetune:j-hartmann/emotion-english-distilroberta-base"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": ""<mask>"", ""trending_score"": null, ""card_data"": ""base_model: j-hartmann/emotion-english-distilroberta-base\nlibrary_name: transformers\nmetrics:\n- accuracy\n- precision\n- recall\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: emotion_trainer\n results: []"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": [{""name"": ""emotion_trainer"", ""results"": []}], ""config"": {""architectures"": [""RobertaForSequenceClassification""], ""model_type"": ""roberta"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar27_09-54-13_1954b9371c03/events.out.tfevents.1743069259.1954b9371c03.792.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar27_09-54-13_1954b9371c03/events.out.tfevents.1743069427.1954b9371c03.792.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar27_11-23-05_eea10b4bf426/events.out.tfevents.1743074588.eea10b4bf426.1567.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar27_11-23-05_eea10b4bf426/events.out.tfevents.1743074724.eea10b4bf426.1567.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Mar27_11-23-05_eea10b4bf426/events.out.tfevents.1743075301.eea10b4bf426.1567.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""JoshuaZywoo/5240FInalProject""], ""safetensors"": {""parameters"": {""F32"": 82123783}, ""total"": 82123783}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-27 11:53:08+00:00"", ""cardData"": ""base_model: j-hartmann/emotion-english-distilroberta-base\nlibrary_name: transformers\nmetrics:\n- accuracy\n- precision\n- recall\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: emotion_trainer\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": null, ""pipeline_tag"": ""text-classification"", ""processor"": ""AutoTokenizer""}, ""_id"": ""67e523f66fee086a04110bf4"", ""modelId"": ""shengqizhao0124/emotion_trainer"", ""usedStorage"": 657151206}",1,,0,,0,,0,,0,"JoshuaZywoo/5240FInalProject, huggingface/InferenceSupport/discussions/new?title=shengqizhao0124/emotion_trainer&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshengqizhao0124%2Femotion_trainer%5D(%2Fshengqizhao0124%2Femotion_trainer)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
falcon-40b-instruct-GPTQ_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ TheBloke/falcon-40b-instruct-GPTQ,"---
3
+ datasets:
4
+ - tiiuae/falcon-refinedweb
5
+ license: apache-2.0
6
+ language:
7
+ - en
8
+ inference: false
9
+ ---
10
+
11
+ <!-- header start -->
12
+ <!-- 200823 -->
13
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
14
+ <img src=""https://i.imgur.com/EBdldam.jpg"" alt=""TheBlokeAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
15
+ </div>
16
+ <div style=""display: flex; justify-content: space-between; width: 100%;"">
17
+ <div style=""display: flex; flex-direction: column; align-items: flex-start;"">
18
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://discord.gg/theblokeai"">Chat & support: TheBloke's Discord server</a></p>
19
+ </div>
20
+ <div style=""display: flex; flex-direction: column; align-items: flex-end;"">
21
+ <p style=""margin-top: 0.5em; margin-bottom: 0em;""><a href=""https://www.patreon.com/TheBlokeAI"">Want to contribute? TheBloke's Patreon page</a></p>
22
+ </div>
23
+ </div>
24
+ <div style=""text-align:center; margin-top: 0em; margin-bottom: 0em""><p style=""margin-top: 0.25em; margin-bottom: 0em;"">TheBloke's LLM work is generously supported by a grant from <a href=""https://a16z.com"">andreessen horowitz (a16z)</a></p></div>
25
+ <hr style=""margin-top: 1.0em; margin-bottom: 1.0em;"">
26
+ <!-- header end -->
27
+
28
+ # Falcon-40B-Instruct 4bit GPTQ
29
+
30
+ This repo contains an experimantal GPTQ 4bit model for [Falcon-40B-Instruct](https://huggingface.co/tiiuae/falcon-40b-instruct).
31
+
32
+ It is the result of quantising to 4bit using [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ).
33
+
34
+ ## Repositories available
35
+
36
+ * [4-bit GPTQ model for GPU inference](https://huggingface.co/TheBloke/falcon-40b-instruct-GPTQ)
37
+ * [3-bit GPTQ model for GPU inference](https://huggingface.co/TheBloke/falcon-40b-instruct-3bit-GPTQ)
38
+ * [2, 3, 4, 5, 6, 8-bit GGML models for CPU+GPU inference](https://huggingface.co/TheBloke/falcon-40b-instruct-GGML)
39
+ * [Unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/tiiuae/falcon-40b-instruct)
40
+
41
+ ## Prompt template
42
+
43
+ ```
44
+ A helpful assistant who helps the user with any questions asked.
45
+ User: prompt
46
+ Assistant:
47
+ ```
48
+
49
+ ## EXPERIMENTAL
50
+
51
+ Please note this is an experimental GPTQ model. Support for it is currently quite limited.
52
+
53
+ It is also expected to be **VERY SLOW**. This is currently unavoidable, but is being looked at.
54
+
55
+ This 4bit model requires at least 35GB VRAM to load. It can be used on 40GB or 48GB cards, but not less.
56
+
57
+ Please be aware that you should currently expect around 0.7 tokens/s on 40B Falcon GPTQ.
58
+
59
+ ## AutoGPTQ
60
+
61
+ AutoGPTQ is required: `pip install auto-gptq`
62
+
63
+ AutoGPTQ provides pre-compiled wheels for Windows and Linux, with CUDA toolkit 11.7 or 11.8.
64
+
65
+ If you are running CUDA toolkit 12.x, you will need to compile your own by following these instructions:
66
+
67
+ ```
68
+ git clone https://github.com/PanQiWei/AutoGPTQ
69
+ cd AutoGPTQ
70
+ pip install .
71
+ ```
72
+
73
+ These manual steps will require that you have the [Nvidia CUDA toolkit](https://developer.nvidia.com/cuda-12-0-1-download-archive) installed.
74
+
75
+ ## text-generation-webui
76
+
77
+ There is provisional AutoGPTQ support in text-generation-webui.
78
+
79
+ This requires text-generation-webui as of commit 204731952ae59d79ea3805a425c73dd171d943c3.
80
+
81
+ So please first update text-genration-webui to the latest version.
82
+
83
+ ## How to download and use this model in text-generation-webui
84
+
85
+ 1. Launch text-generation-webui
86
+ 2. Click the **Model tab**.
87
+ 3. Untick **Autoload model**
88
+ 4. Under **Download custom model or LoRA**, enter `TheBloke/falcon-40B-instruct-GPTQ`.
89
+ 5. Click **Download**.
90
+ 6. Wait until it says it's finished downloading.
91
+ 7. Click the **Refresh** icon next to **Model** in the top left.
92
+ 8. In the **Model drop-down**: choose the model you just downloaded, `falcon-40B-instruct-GPTQ`.
93
+ 9. Make sure **Loader** is set to **AutoGPTQ**. This model will not work with ExLlama or GPTQ-for-LLaMa.
94
+ 10. Tick **Trust Remote Code**, followed by **Save Settings**
95
+ 11. Click **Reload**.
96
+ 12. Once it says it's loaded, click the **Text Generation tab** and enter a prompt!
97
+
98
+ ## About `trust_remote_code`
99
+
100
+ Please be aware that this command line argument causes Python code provided by Falcon to be executed on your machine.
101
+
102
+ This code is required at the moment because Falcon is too new to be supported by Hugging Face transformers. At some point in the future transformers will support the model natively, and then `trust_remote_code` will no longer be needed.
103
+
104
+ In this repo you can see two `.py` files - these are the files that get executed. They are copied from the base repo at [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct).
105
+
106
+ ## Simple Python example code
107
+
108
+ To run this code you need to install AutoGPTQ and einops:
109
+ ```
110
+ pip install auto-gptq
111
+ pip install einops
112
+ ```
113
+
114
+ You can then run this example code:
115
+ ```python
116
+ from transformers import AutoTokenizer, pipeline, logging
117
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
118
+ import argparse
119
+
120
+ model_name_or_path = ""TheBloke/falcon-40b-instruct-GPTQ""
121
+ # You could also download the model locally, and access it there
122
+ # model_name_or_path = ""/path/to/TheBloke_falcon-40b-instruct-GPTQ""
123
+
124
+ model_basename = ""model""
125
+
126
+ use_triton = False
127
+
128
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
129
+
130
+ model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
131
+ model_basename=model_basename,
132
+ use_safetensors=True,
133
+ trust_remote_code=True,
134
+ device=""cuda:0"",
135
+ use_triton=use_triton,
136
+ quantize_config=None)
137
+
138
+ prompt = ""Tell me about AI""
139
+ prompt_template=f'''A helpful assistant who helps the user with any questions asked.
140
+ User: {prompt}
141
+ Assistant:''
142
+
143
+ print(""\n\n*** Generate:"")
144
+
145
+ input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
146
+ output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
147
+ print(tokenizer.decode(output[0]))
148
+
149
+ # Inference can also be done using transformers' pipeline
150
+ # Note that if you use pipeline, you will see a spurious error message saying the model type is not supported
151
+ # This can be ignored! Or you can hide it with the following logging line:
152
+ # Prevent printing spurious transformers error when using pipeline with AutoGPTQ
153
+ logging.set_verbosity(logging.CRITICAL)
154
+
155
+ print(""*** Pipeline:"")
156
+ pipe = pipeline(
157
+ ""text-generation"",
158
+ model=model,
159
+ tokenizer=tokenizer,
160
+ max_new_tokens=512,
161
+ temperature=0.7,
162
+ top_p=0.95,
163
+ repetition_penalty=1.15
164
+ )
165
+
166
+ print(pipe(prompt_template)[0]['generated_text'])
167
+ ```
168
+
169
+ ## Provided files
170
+
171
+ **gptq_model-4bit--1g.safetensors**
172
+
173
+ This will work with AutoGPTQ 0.2.0 and later.
174
+
175
+ It was created without groupsize to reduce VRAM requirements, and with `desc_act` (act-order) to improve inference quality.
176
+
177
+ * `gptq_model-4bit--1g.safetensors`
178
+ * Works AutoGPTQ 0.2.0 and later.
179
+ * At this time it does not work with AutoGPTQ Triton, but support will hopefully be added in time.
180
+ * Works with text-generation-webui using `--trust-remote-code`
181
+ * Does not work with any version of GPTQ-for-LLaMa
182
+ * Parameters: Groupsize = None. Act order (desc_act)
183
+
184
+ <!-- footer start -->
185
+ <!-- 200823 -->
186
+ ## Discord
187
+
188
+ For further support, and discussions on these models and AI in general, join us at:
189
+
190
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
191
+
192
+ ## Thanks, and how to contribute.
193
+
194
+ Thanks to the [chirper.ai](https://chirper.ai) team!
195
+
196
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
197
+
198
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
199
+
200
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
201
+
202
+ * Patreon: https://patreon.com/TheBlokeAI
203
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
204
+
205
+ **Special thanks to**: Aemon Algiz.
206
+
207
+ **Patreon special mentions**: Sam, theTransient, Jonathan Leane, Steven Wood, webtim, Johann-Peter Hartmann, Geoffrey Montalvo, Gabriel Tamborski, Willem Michiel, John Villwock, Derek Yates, Mesiah Bishop, Eugene Pentland, Pieter, Chadd, Stephen Murray, Daniel P. Andersen, terasurfer, Brandon Frisco, Thomas Belote, Sid, Nathan LeClaire, Magnesian, Alps Aficionado, Stanislav Ovsiannikov, Alex, Joseph William Delisle, Nikolai Manek, Michael Davis, Junyu Yang, K, J, Spencer Kim, Stefan Sabev, Olusegun Samson, transmissions 11, Michael Levine, Cory Kujawski, Rainer Wilmers, zynix, Kalila, Luke @flexchar, Ajan Kanaga, Mandus, vamX, Ai Maven, Mano Prime, Matthew Berman, subjectnull, Vitor Caleffi, Clay Pascal, biorpg, alfie_i, 阿明, Jeffrey Morgan, ya boyyy, Raymond Fosdick, knownsqashed, Olakabola, Leonard Tan, ReadyPlayerEmma, Enrico Ros, Dave, Talal Aujan, Illia Dulskyi, Sean Connelly, senxiiz, Artur Olbinski, Elle, Raven Klaugh, Fen Risland, Deep Realms, Imad Khwaja, Fred von Graf, Will Dee, usrbinkat, SuperWojo, Alexandros Triantafyllidis, Swaroop Kallakuri, Dan Guido, John Detwiler, Pedro Madruga, Iucharbius, Viktor Bowallius, Asp the Wyvern, Edmond Seymore, Trenton Dambrowitz, Space Cruiser, Spiking Neurons AB, Pyrater, LangChain4j, Tony Hughes, Kacper Wikieł, Rishabh Srivastava, David Ziegler, Luke Pendergrass, Andrey, Gabriel Puliatti, Lone Striker, Sebastain Graf, Pierre Kircher, Randy H, NimbleBox.ai, Vadim, danny, Deo Leter
208
+
209
+
210
+ Thank you to all my generous patrons and donaters!
211
+
212
+ And thank you again to a16z for their generous grant.
213
+
214
+ <!-- footer end -->
215
+
216
+ # ✨ Original model card: Falcon-40B-Instruct
217
+
218
+ # ✨ Falcon-40B-Instruct
219
+
220
+ **Falcon-40B-Instruct is a 40B parameters causal decoder-only model built by [TII](https://www.tii.ae) based on [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b) and finetuned on a mixture of [Baize](https://github.com/project-baize/baize-chatbot). It is made available under the [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-40b-instruct/blob/main/LICENSE.txt).**
221
+
222
+ *Paper coming soon 😊.*
223
+
224
+ ## Why use Falcon-40B-Instruct?
225
+
226
+ * **You are looking for a ready-to-use chat/instruct model based on [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b).**
227
+ * **Falcon-40B is the best open-source model available.** It outperforms [LLaMA](https://github.com/facebookresearch/llama), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1), [MPT](https://huggingface.co/mosaicml/mpt-7b), etc. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
228
+ * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)).
229
+
230
+ 💬 **This is an instruct model, which may not be ideal for further finetuning.** If you are interested in building your own instruct/chat model, we recommend starting from [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b).
231
+
232
+ 💸 **Looking for a smaller, less expensive model?** [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct) is Falcon-40B-Instruct's small brother!
233
+
234
+ ```python
235
+ from transformers import AutoTokenizer, AutoModelForCausalLM
236
+ import transformers
237
+ import torch
238
+
239
+ model = ""tiiuae/falcon-40b-instruct""
240
+
241
+ tokenizer = AutoTokenizer.from_pretrained(model)
242
+ pipeline = transformers.pipeline(
243
+ ""text-generation"",
244
+ model=model,
245
+ tokenizer=tokenizer,
246
+ torch_dtype=torch.bfloat16,
247
+ trust_remote_code=True,
248
+ device_map=""auto"",
249
+ )
250
+ sequences = pipeline(
251
+ ""Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:"",
252
+ max_length=200,
253
+ do_sample=True,
254
+ top_k=10,
255
+ num_return_sequences=1,
256
+ eos_token_id=tokenizer.eos_token_id,
257
+ )
258
+ for seq in sequences:
259
+ print(f""Result: {seq['generated_text']}"")
260
+
261
+ ```
262
+
263
+ # Model Card for Falcon-40B-Instruct
264
+
265
+ ## Model Details
266
+
267
+ ### Model Description
268
+
269
+ - **Developed by:** [https://www.tii.ae](https://www.tii.ae);
270
+ - **Model type:** Causal decoder-only;
271
+ - **Language(s) (NLP):** English and French;
272
+ - **License:** [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-7b-instruct/blob/main/LICENSE.txt);
273
+ - **Finetuned from model:** [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b).
274
+
275
+ ### Model Source
276
+
277
+ - **Paper:** *coming soon*.
278
+
279
+ ## Uses
280
+
281
+ ### Direct Use
282
+
283
+ Falcon-40B-Instruct has been finetuned on a chat dataset.
284
+
285
+ ### Out-of-Scope Use
286
+
287
+ Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful.
288
+
289
+ ## Bias, Risks, and Limitations
290
+
291
+ Falcon-40B-Instruct is mostly trained on English data, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online.
292
+
293
+ ### Recommendations
294
+
295
+ We recommend users of Falcon-40B-Instruct to develop guardrails and to take appropriate precautions for any production use.
296
+
297
+ ## How to Get Started with the Model
298
+
299
+
300
+ ```python
301
+ from transformers import AutoTokenizer, AutoModelForCausalLM
302
+ import transformers
303
+ import torch
304
+
305
+ model = ""tiiuae/falcon-40b-instruct""
306
+
307
+ tokenizer = AutoTokenizer.from_pretrained(model)
308
+ pipeline = transformers.pipeline(
309
+ ""text-generation"",
310
+ model=model,
311
+ tokenizer=tokenizer,
312
+ torch_dtype=torch.bfloat16,
313
+ trust_remote_code=True,
314
+ device_map=""auto"",
315
+ )
316
+ sequences = pipeline(
317
+ ""Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:"",
318
+ max_length=200,
319
+ do_sample=True,
320
+ top_k=10,
321
+ num_return_sequences=1,
322
+ eos_token_id=tokenizer.eos_token_id,
323
+ )
324
+ for seq in sequences:
325
+ print(f""Result: {seq['generated_text']}"")
326
+
327
+ ```
328
+
329
+ ## Training Details
330
+
331
+ ### Training Data
332
+
333
+ Falcon-40B-Instruct was finetuned on a 150M tokens from [Bai ze](https://github.com/project-baize/baize-chatbot) mixed with 5% of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) data.
334
+
335
+
336
+ The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer.
337
+
338
+
339
+ ## Evaluation
340
+
341
+ *Paper coming soon.*
342
+
343
+ See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results.
344
+
345
+
346
+ ## Technical Specifications
347
+
348
+ For more information about pretraining, see [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b).
349
+
350
+ ### Model Architecture and Objective
351
+
352
+ Falcon-40B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token).
353
+
354
+ The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences:
355
+
356
+ * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864));
357
+ * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135));
358
+ * **Decoder-block:** parallel attention/MLP with a single layer norm.
359
+
360
+ For multiquery, we are using an internal variant which uses independent key and values per tensor parallel degree.
361
+
362
+ | **Hyperparameter** | **Value** | **Comment** |
363
+ |--------------------|-----------|----------------------------------------|
364
+ | Layers | 60 | |
365
+ | `d_model` | 8192 | |
366
+ | `head_dim` | 64 | Reduced to optimise for FlashAttention |
367
+ | Vocabulary | 65024 | |
368
+ | Sequence length | 2048 | |
369
+
370
+ ### Compute Infrastructure
371
+
372
+ #### Hardware
373
+
374
+ Falcon-40B-Instruct was trained on AWS SageMaker, on 64 A100 40GB GPUs in P4d instances.
375
+
376
+ #### Software
377
+
378
+ Falcon-40B-Instruct was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.)
379
+
380
+
381
+ ## Citation
382
+
383
+ *Paper coming soon 😊.*
384
+
385
+ ## License
386
+
387
+ Falcon-40B-Instruct is made available under the [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-40b-instruct/blob/main/LICENSE.txt). Broadly speaking,
388
+ * You can freely use our models for research and/or personal purpose;
389
+ * You are allowed to share and build derivatives of these models, but you are required to give attribution and to share-alike with the same license;
390
+ * For commercial use, you are exempt from royalties payment if the attributable revenues are inferior to $1M/year, otherwise you should enter in a commercial agreement with TII.
391
+
392
+
393
+ ## Contact
394
+ falconllm@tii.ae
395
+
396
+ ","{""id"": ""TheBloke/falcon-40b-instruct-GPTQ"", ""author"": ""TheBloke"", ""sha"": ""57ac6eae1469d42d37781df19576896490023ec2"", ""last_modified"": ""2023-08-21 11:20:50+00:00"", ""created_at"": ""2023-05-27 09:06:50+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 111, ""downloads_all_time"": null, ""likes"": 197, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""RefinedWeb"", ""text-generation"", ""custom_code"", ""en"", ""dataset:tiiuae/falcon-refinedweb"", ""arxiv:2205.14135"", ""arxiv:1911.02150"", ""arxiv:2005.14165"", ""arxiv:2104.09864"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""4-bit"", ""gptq"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- tiiuae/falcon-refinedweb\nlanguage:\n- en\nlicense: apache-2.0\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""RWForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_RW.RWConfig"", ""AutoModelForCausalLM"": ""modelling_RW.RWForCausalLM""}, ""model_type"": ""RefinedWeb"", ""quantization_config"": {""bits"": 4, ""quant_method"": ""gptq""}, ""tokenizer_config"": {""eos_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modelling_RW.RWForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_RW.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modelling_RW.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantize_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""I32"": 5099957760, ""BF16"": 1067335680, ""F16"": 7004160}, ""total"": 6174297600}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-21 11:20:50+00:00"", ""cardData"": ""datasets:\n- tiiuae/falcon-refinedweb\nlanguage:\n- en\nlicense: apache-2.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modelling_RW.RWForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6471c82a97a75cc77aa6cb70"", ""modelId"": ""TheBloke/falcon-40b-instruct-GPTQ"", ""usedStorage"": 45097362760}",0,,0,,0,,0,,0,"HuggingFaceH4/open_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=TheBloke/falcon-40b-instruct-GPTQ&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BTheBloke%2Ffalcon-40b-instruct-GPTQ%5D(%2FTheBloke%2Ffalcon-40b-instruct-GPTQ)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
flan-t5-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
The diff for this file is too large to render. See raw diff
 
flux-lora-collection_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ XLabs-AI/flux-lora-collection,"---
3
+ license: other
4
+ license_name: flux-1-dev-non-commercial-license
5
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.
6
+ language:
7
+ - en
8
+ pipeline_tag: text-to-image
9
+ tags:
10
+ - LoRA
11
+ - Stable Diffusion
12
+ - image-generation
13
+ - Flux
14
+ ---
15
+ ![FLUX LoRA Collections](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/light/flux-lora-collection-rev1.png?raw=true)
16
+ This repository provides a checkpoint with trained LoRAs for
17
+ [FLUX.1-dev model](https://huggingface.co/black-forest-labs/FLUX.1-dev) by Black Forest Labs
18
+ [<img src=""https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/light/join-our-discord-rev1.png?raw=true"">](https://discord.gg/FHY2guThfy)
19
+
20
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/furry4.png?raw=true)
21
+ # ComfyUI
22
+
23
+ [See our github](https://github.com/XLabs-AI/x-flux-comfyui) for comfy ui workflows.
24
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux-comfyui/blob/main/assets/image1.png?raw=true)
25
+ # Training details
26
+ [XLabs AI](https://github.com/XLabs-AI) team is happy to publish fune-tuning Flux scripts, including:
27
+
28
+ - **LoRA** 🔥
29
+ - **ControlNet** 🔥
30
+
31
+ [See our github](https://github.com/XLabs-AI/x-flux) for train script and train configs.
32
+
33
+ # Training Dataset
34
+ Dataset has the following format for the training process:
35
+
36
+ ```
37
+ ├── images/
38
+ │ ├── 1.png
39
+ │ ├── 1.json
40
+ │ ├── 2.png
41
+ │ ├── 2.json
42
+ │ ├── ...
43
+ ```
44
+ A .json file contains ""caption"" field with a text prompt.
45
+
46
+ Thank https://civitai.com/user/dobrosketchkun and https://civitai.com/user/sadxzero for datasets for loras
47
+
48
+ # Inference
49
+ ## furry_lora
50
+ ```bash
51
+ python3 main.py \
52
+ --prompt ""Female furry Pixie with text 'hello world'"" \
53
+ --lora_repo_id XLabs-AI/flux-furry-lora --lora_name furry_lora.safetensors --device cuda --offload --use_lora \
54
+ --model_type flux-dev-fp8 --width 1024 --height 1024 \
55
+ --timestep_to_start_cfg 1 --num_steps 25 --true_gs 3.5 --guidance 4
56
+
57
+ ```
58
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/furry4.png?raw=true)
59
+ ```bash
60
+ python3 main.py \
61
+ --prompt ""Male furry Lycanthrope with fur-covered body in ancient ruins, howling at the full moon, surrounded by eerie mist, werewolf transformation, elder scrolls, eslweyr, glitch aesthetic, anime-inspired, digital illustration, artstation, furry"" \
62
+ --lora_repo_id XLabs-AI/flux-furry-lora --lora_name furry_lora.safetensors --device cuda --offload --use_lora \
63
+ --model_type flux-dev-fp8 --width 1024 --height 1024 \
64
+ --timestep_to_start_cfg 1 --num_steps 25 --true_gs 3.5
65
+
66
+ ```
67
+
68
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/furry2.png?raw=true)
69
+ ## mjv6_lora
70
+ ```bash
71
+ python3 main.py \
72
+ --prompt ""A handsome man in a suit, 25 years old, cool, futuristic"" \
73
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name mjv6_lora.safetensors \
74
+ --device cuda:4 --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
75
+ ```
76
+
77
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_13.png?raw=true)
78
+
79
+ ```bash
80
+ python3 main.py \
81
+ --prompt ""A girl in a suit covered with bold tattoos and holding a vest pistol, beautiful woman, 25 years old, cool, future fantasy, turquoise & light orange ping curl hair"" \
82
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name mjv6_lora.safetensors \
83
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
84
+ ```
85
+
86
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_12.png?raw=true)
87
+ ## anime_lora
88
+ ```bash
89
+ python3 main.py \
90
+ --prompt ""A cute corgi lives in a house made out of sushi, anime"" \
91
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name anime_lora.safetensors \
92
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
93
+ ```
94
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true)
95
+ ```bash
96
+ python3 main.py \
97
+ --prompt ""a girl with orange hair, standing in a room with a window, looking out at a cityscape, anime"" \
98
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name anime_lora.safetensors \
99
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
100
+ ```
101
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_15.png?raw=true)
102
+
103
+ ## disney_lora
104
+ ```bash
105
+ python3 main.py \
106
+ --prompt ""An aerial view of beach with people on it, disney style"" \
107
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name disney_lora.safetensors \
108
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
109
+ ```
110
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_19.png?raw=true)
111
+
112
+ ```bash
113
+ python3 main.py \
114
+ --prompt ""A blue jay standing on a large basket of rainbow macarons, disney style"" \
115
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name disney_lora.safetensors \
116
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
117
+ ```
118
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true)
119
+
120
+ ## scenery_lora
121
+ ```bash
122
+ python3 main.py \
123
+ --prompt ""A fantasy cityscape with multiple buildings and skyscrapers all of which are covered in snow and ice, scenery style"" \
124
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name scenery_lora.safetensors \
125
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
126
+ ```
127
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_21.png?raw=true)
128
+
129
+ ```bash
130
+ python3 main.py \
131
+ --prompt ""A large ornate building with multiple levels and arches surrounded by trees and greenery. In front of it there are several statues and sculptures on pedestals with fire burning brightly in front of them, scenery style"" \
132
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name scenery_lora.safetensors \
133
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
134
+ ```
135
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_22.png?raw=true)
136
+ ## art_lora
137
+ ```bash
138
+ python3 main.py \
139
+ --prompt ""white rabbit in blue dress and hat holding bow and arrow, art"" \
140
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name art_lora.safetensors \
141
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
142
+ ```
143
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true)
144
+
145
+ ```bash
146
+ python3 main.py \
147
+ --prompt ""castle in the middle of forest at night, art"" \
148
+ --lora_repo_id XLabs-AI/flux-lora-collection --lora_name art_lora.safetensors \
149
+ --device cuda --offload --use_lora --model_type flux-dev-fp8 --width 1024 --height 1024
150
+ ```
151
+ ![Example Picture 1](https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_24.png?raw=true)
152
+ # License
153
+
154
+ lora.safetensors falls under the [FLUX.1 [dev]](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) Non-Commercial License<br/>","{""id"": ""XLabs-AI/flux-lora-collection"", ""author"": ""XLabs-AI"", ""sha"": ""9ddb42ad970b5d31f50ac02d223adea81413ba4c"", ""last_modified"": ""2024-08-14 15:21:03+00:00"", ""created_at"": ""2024-08-08 16:57:58+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 544, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""LoRA"", ""Stable Diffusion"", ""image-generation"", ""Flux"", ""text-to-image"", ""en"", ""license:other"", ""region:us""], ""pipeline_tag"": ""text-to-image"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.\npipeline_tag: text-to-image\ntags:\n- LoRA\n- Stable Diffusion\n- image-generation\n- Flux"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='anime_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='art_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='art_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='disney_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='disney_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='furry_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mjv6_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='mjv6_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='realism_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='realism_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scenery_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scenery_lora_comfy_converted.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""multimodalart/flux-lora-the-explorer"", ""multimodalart/flux-lora-lab"", ""openfree/GiniGEN"", ""guardiancc/flux-advanced-explorer"", ""fantaxy/flxloraexp"", ""ginipick/flxloraexp"", ""Nymbo/flux-lora-the-explorer"", ""seawolf2357/flxloraexp"", ""fantos/flxloraexp"", ""ginigen/Multi-LoRAgen"", ""joselobenitezg/obtu-ai"", ""stazizov/XFluxSpace"", ""mukaist/flux-lora-the-explorer"", ""Svngoku/flux-lora-the-explorer"", ""ginigen/Multi-LoRA-gen"", ""Surn/HexaGrid"", ""TheOneHong/flux-lora-the-explorer"", ""ameerazam08/flux-lora-the-explorer"", ""jaredaja88/flux-lora-the-explorer"", ""SplaatKlasky/flux-lora-the-explorer"", ""GaboChoropan/flux-lora-the-explorer"", ""BlackPlasma/flux-lora-the-explorer"", ""Mugiwara93/JuicyFluxLoras"", ""waloneai/WKflux-lora-the-explorer"", ""ZENLLC/flux-lora-the-explorer"", ""Dragunflie-420/flux-lora-the-explorer"", ""Raumkommander/flux-lora-the-explorer"", ""johndpark/flux-lora-the-explorer"", ""HuggingFaceSupport/flux-lora"", ""tenet/flux-lora-the-explorer"", ""erikbeltran/24labsimages"", ""John6666/Xlabs-Gradio-error"", ""BeingSuleman/flux-lora-lab"", ""annoyingpixel/flux-lora-lab"", ""jojosims4557/nananie"", ""cngsm/lrha"", ""aajunior43/flux-lora-lab"", ""boblemaz/flux-lora-lab"", ""Mizetto/flux-lora-lab"", ""khanhere/flux-lora-lab-duplicatedd"", ""reza74ii/flux-lora-the-explorer"", ""jkorstad/flux-lora-the-explorer"", ""soiz1/flux-lora-the-explorer"", ""John6666/flux-lora-the-explorer-test"", ""oscar00oscar/flux-lora-the-explorer"", ""codermert/flux-lora-the-explorer"", ""jheansad/flux-lora-the-explorer""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-14 15:21:03+00:00"", ""cardData"": ""language:\n- en\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.\npipeline_tag: text-to-image\ntags:\n- LoRA\n- Stable Diffusion\n- image-generation\n- Flux"", ""transformersInfo"": null, ""_id"": ""66b4f9168de19c0bb325177a"", ""modelId"": ""XLabs-AI/flux-lora-collection"", ""usedStorage"": 1479610132}",0,https://huggingface.co/tpinville/lora1,1,"https://huggingface.co/futureaicorner/Flux_1, https://huggingface.co/TRPJ/0, https://huggingface.co/dennis-sleepytales/diseny_model, https://huggingface.co/dennis-sleepytales/art_style, https://huggingface.co/EVA787797/78787879zert, https://huggingface.co/EVA787797/45644hjk, https://huggingface.co/EVA787797/juuiu8988, https://huggingface.co/tompot11/THECAR, https://huggingface.co/EVA787797/tyu7787877",9,,0,,0,"Surn/HexaGrid, Svngoku/flux-lora-the-explorer, TheOneHong/flux-lora-the-explorer, fantaxy/flxloraexp, fantos/flxloraexp, ginigen/Multi-LoRA-gen, ginigen/Multi-LoRAgen, huggingface/InferenceSupport/discussions/new?title=XLabs-AI/flux-lora-collection&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BXLabs-AI%2Fflux-lora-collection%5D(%2FXLabs-AI%2Fflux-lora-collection)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jaredaja88/flux-lora-the-explorer, mukaist/flux-lora-the-explorer, multimodalart/flux-lora-lab, multimodalart/flux-lora-the-explorer, seawolf2357/flxloraexp",13
155
+ tpinville/lora1,"---
156
+ license: apache-2.0
157
+ language:
158
+ - en
159
+ base_model:
160
+ - XLabs-AI/flux-lora-collection
161
+ ---","{""id"": ""tpinville/lora1"", ""author"": ""tpinville"", ""sha"": ""bdc6b364b1405459a72d7adbccb83217a4aaf607"", ""last_modified"": ""2024-10-14 10:38:16+00:00"", ""created_at"": ""2024-10-14 10:01:02+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""en"", ""base_model:XLabs-AI/flux-lora-collection"", ""base_model:finetune:XLabs-AI/flux-lora-collection"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- XLabs-AI/flux-lora-collection\nlanguage:\n- en\nlicense: apache-2.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='stopperlora.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-14 10:38:16+00:00"", ""cardData"": ""base_model:\n- XLabs-AI/flux-lora-collection\nlanguage:\n- en\nlicense: apache-2.0"", ""transformersInfo"": null, ""_id"": ""670cebde34108ca89f5faabe"", ""modelId"": ""tpinville/lora1"", ""usedStorage"": 171969424}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=tpinville/lora1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Btpinville%2Flora1%5D(%2Ftpinville%2Flora1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
flux1-dev_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Comfy-Org/flux1-dev,"---
3
+ license: other
4
+ license_name: flux-1-dev-non-commercial-license
5
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/LICENSE.md
6
+ ---
7
+
8
+ This is a smaller checkpoint for flux1-dev that will work better for ComfyUI users with less VRAM (under 24gb).
9
+
10
+ The two text encoders used by Flux are already included in this one safetensor.
11
+
12
+ Use it with the `Load Checkpoint` node in ComfyUI.","{""id"": ""Comfy-Org/flux1-dev"", ""author"": ""Comfy-Org"", ""sha"": ""f062db3fdcd6a91b73f99236a7dc9cac9f339964"", ""last_modified"": ""2024-09-01 00:37:54+00:00"", ""created_at"": ""2024-08-03 20:04:30+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 506, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""license:other"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/LICENSE.md"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='flux1-dev-fp8.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""ramimu/LoRa_Streamlit""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-09-01 00:37:54+00:00"", ""cardData"": ""license: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/LICENSE.md"", ""transformersInfo"": null, ""_id"": ""66ae8d4edb3168f9d1753958"", ""modelId"": ""Comfy-Org/flux1-dev"", ""usedStorage"": 17246524772}",0,,0,,0,https://huggingface.co/gguf-org/flux-dev-gguf,1,,0,"huggingface/InferenceSupport/discussions/new?title=Comfy-Org/flux1-dev&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BComfy-Org%2Fflux1-dev%5D(%2FComfy-Org%2Fflux1-dev)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, ramimu/LoRa_Streamlit",2
gpt-neox-20b_finetunes_20250425_143346.csv_finetunes_20250425_143346.csv ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ EleutherAI/gpt-neox-20b,"---
3
+ language:
4
+ - en
5
+ tags:
6
+ - pytorch
7
+ - causal-lm
8
+ license: apache-2.0
9
+ datasets:
10
+ - EleutherAI/pile
11
+ ---
12
+
13
+ GPT-NeoX-20B is a 20 billion parameter autoregressive language model trained
14
+ on [the Pile](https://pile.eleuther.ai/) using the [GPT-NeoX
15
+ library](https://github.com/EleutherAI/gpt-neox). Its architecture intentionally
16
+ resembles that of GPT-3, and is almost identical to that of [GPT-J-
17
+ 6B](https://huggingface.co/EleutherAI/gpt-j-6B). Its training dataset contains
18
+ a multitude of English-language texts, reflecting the general-purpose nature
19
+ of this model. See the [accompanying paper](https://arxiv.org/abs/2204.06745)
20
+ for details about model architecture (including how it differs from GPT-3),
21
+ training procedure, and additional evaluations.
22
+
23
+ ### Model details
24
+
25
+ - Developed by: [EleutherAI](http://eleuther.ai)
26
+ - Model type: Transformer-based Language Model
27
+ - Language: English
28
+ - Learn more: [GPT-NeoX-20B: An Open-Source Autoregressive Language
29
+ Model](https://arxiv.org/abs/2204.06745). For details about the training dataset,
30
+ see [the Pile paper](https://arxiv.org/abs/2101.00027), and [its data
31
+ sheet](https://arxiv.org/abs/2201.07311).
32
+ - License: Apache 2.0
33
+ - Contact: to ask questions about this model, join the [EleutherAI
34
+ Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`.
35
+ Please read the existing GPT-NeoX-20B documentation before asking about the model
36
+ on Discord. For general correspondence: [contact@eleuther.
37
+ ai](mailto:contact@eleuther.ai).
38
+
39
+ <figure style=""width:30em"">
40
+
41
+ | Hyperparameter | Value |
42
+ | ---------------------- | ----------- |
43
+ | n<sub>parameters</sub> | 20554567680 |
44
+ | n<sub>layers</sub> | 44 |
45
+ | d<sub>model</sub> | 6144 |
46
+ | n<sub>heads</sub> | 64 |
47
+ | d<sub>head</sub> | 96 |
48
+ | n<sub>vocab</sub> | 50257 |
49
+ | Sequence Length | 2048 |
50
+ | Learning Rate | 0.97 x 10<sup>-5</sup> |
51
+ | Positional Encoding | [Rotary Position Embedding (RoPE)](https://arxiv.org/abs/2104.09864) |
52
+ </figure>
53
+
54
+ ### Uses and limitations
55
+
56
+ #### Intended use
57
+
58
+ GPT-NeoX-20B was developed primarily for research purposes. It learns an inner
59
+ representation of the English language that can be used to extract features
60
+ useful for downstream tasks.
61
+
62
+ In addition to scientific uses, you may also further fine-tune and adapt
63
+ GPT-NeoX-20B for deployment, as long as your use is in accordance with the
64
+ Apache 2.0 license. This model works with the [Transformers
65
+ Library](https://huggingface.co/docs/transformers/index). If you decide to use
66
+ pre-trained GPT-NeoX-20B as a basis for your fine-tuned model, please note that
67
+ you need to conduct your own risk and bias assessment.
68
+
69
+ #### Out-of-scope use
70
+
71
+ GPT-NeoX-20B is **not** intended for deployment as-is. It is not a product
72
+ and cannot be used for human-facing interactions without supervision.
73
+
74
+ GPT-NeoX-20B has not been fine-tuned for downstream tasks for which language
75
+ models are commonly deployed, such as writing genre prose, or commercial
76
+ chatbots. This means GPT-NeoX-20B will likely **not** respond to a given prompt
77
+ the way products such as ChatGPT do. This is because, unlike GPT-NeoX-20B,
78
+ ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human
79
+ Feedback (RLHF) to better “understand” human instructions and dialogue.
80
+
81
+ This model is English-language only, and thus cannot be used for translation
82
+ or generating text in other languages.
83
+
84
+ #### Limitations and biases
85
+
86
+ The core functionality of GPT-NeoX-20B is to take a string of text and predict
87
+ the next token. Remember that the statistically most likely next token need
88
+ not result in the most “accurate” text. Never rely on GPT-NeoX-20B to produce
89
+ factually accurate output.
90
+
91
+ This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset
92
+ known to contain profanity and texts that are lewd or otherwise offensive.
93
+ See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a
94
+ discussion of documented biases with regards to gender, religion, and race.
95
+ GPT-NeoX-20B may produce socially unacceptable or undesirable text, *even if*
96
+ the prompt itself does not include anything explicitly offensive.
97
+
98
+ We recommend curating the outputs of this model before presenting it to a human
99
+ reader. Please inform your audience that you are using artificially generated
100
+ text.
101
+
102
+ #### How to use
103
+ If you simply want to try out some prompts, check out [this
104
+ playground](https://20b.eleuther.ai/).
105
+
106
+ GPT-NeoX-20B can be loaded using the `AutoModelForCausalLM` functionality:
107
+ ```python
108
+ from transformers import AutoTokenizer, AutoModelForCausalLM
109
+
110
+ tokenizer = AutoTokenizer.from_pretrained(""EleutherAI/gpt-neox-20b"")
111
+ model = AutoModelForCausalLM.from_pretrained(""EleutherAI/gpt-neox-20b"")
112
+ ```
113
+
114
+ ### Training
115
+
116
+ #### Training dataset
117
+
118
+ The Pile is a 825GiB general-purpose dataset in English. It was created by
119
+ EleutherAI specifically for training large language models. It contains texts
120
+ from 22 diverse sources, roughly broken down into five categories: academic
121
+ writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project
122
+ Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub,
123
+ Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for
124
+ a breakdown of all data sources, methodology, and a discussion of ethical
125
+ implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for
126
+ more detailed documentation about the Pile and its component datasets. The
127
+ Pile can be downloaded from the [official website](https://pile.eleuther.ai/),
128
+ or from a [community mirror](https://the-eye.eu/public/AI/pile/).
129
+
130
+ The Pile was **not** deduplicated before being used to train GPT-NeoX-20B.
131
+
132
+ #### Training procedure
133
+
134
+ GPT-NeoX-20B was trained with a batch size of approximately 3.15M tokens
135
+ (1538 sequences of 2048 tokens each), for a total of 150,000 steps. Tensor
136
+ parallelism and pipeline parallelism were used to distribute the model across
137
+ GPUs. Additional details about the training procedure are in [Section 3 of
138
+ the accompanying paper](https://arxiv.org/abs/2204.06745).
139
+
140
+
141
+ ### Evaluations
142
+
143
+ <figure style=""width:55em"">
144
+
145
+ | Model | OpenAI’s LAMBADA | SciQ | PIQA | TriviaQA | ARC (Challenge) |
146
+ | ------------- | :--------------: | :-----------: | :-----------: | :-----------: | :-------------: |
147
+ | GPT-J-6B | 0.683 ± 0.006 | 0.910 ± 0.009 | 0.752 ± 0.010 | 0.170 ± 0.004 | 0.340 ± 0.014 |
148
+ | FairSeq 6.7B | 0.673 ± 0.007 | 0.895 ± 0.010 | 0.762 ± 0.010 | 0.221 ± 0.004 | 0.329 ± 0.014 |
149
+ | GPT-3 Curie | 0.693 ± 0.006 | 0.918 ± 0.009 | 0.767 ± 0.010 | 0.196 ± 0.004 | 0.334 ± 0.014 |
150
+ | FairSeq 13B | 0.709 ± 0.006 | 0.910 ± 0.009 | 0.769 ± 0.010 | 0.270 ± 0.004 | 0.345 ± 0.014 |
151
+ | GPT-NeoX-20B | 0.720 ± 0.006 | 0.928 ± 0.008 | 0.779 ± 0.010 | 0.259 ± 0.004 | 0.380 ± 0.014 |
152
+ | GPT-3 DaVinci | 0.752 ± 0.006 | 0.949 ± 0.007 | 0.791 ± 0.009 | 0.409 ± 0.005 | 0.435 ± 0.014 |
153
+ <figcaption>Zero-shot performance on selected natural language tasks.</figcaption>
154
+ </figure>
155
+
156
+ This is a heavily abridged version of the evaluation results. Appendix D of the
157
+ [GPT-NeoX-20B paper](https://arxiv.org/abs/2204.06745) compares more model
158
+ sizes, and contains additional evaluations, including on: zero and five-shot
159
+ natural language tasks, zero and five-shot Basic Arithmetic and MATH,
160
+ and zero-shot Hendrycks tasks.
161
+
162
+ ### BibTeX
163
+
164
+ To cite the GPT-NeoX-20B paper:
165
+
166
+ ```
167
+ @misc{https://doi.org/10.48550/arxiv.2204.06745,
168
+ doi = {10.48550/ARXIV.2204.06745},
169
+
170
+ url = {https://arxiv.org/abs/2204.06745},
171
+
172
+ author = {Black, Sid and Biderman, Stella and Hallahan, Eric and Anthony, Quentin and Gao, Leo and Golding, Laurence and He, Horace and Leahy, Connor and McDonell, Kyle and Phang, Jason and Pieler, Michael and Prashanth, USVSN Sai and Purohit, Shivanshu and Reynolds, Laria and Tow, Jonathan and Wang, Ben and Weinbach, Samuel},
173
+
174
+ keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
175
+
176
+ title = {GPT-NeoX-20B: An Open-Source Autoregressive Language Model},
177
+
178
+ publisher = {arXiv},
179
+
180
+ year = {2022},
181
+
182
+ copyright = {Creative Commons Attribution 4.0 International}
183
+ }
184
+ ```
185
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
186
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_EleutherAI__gpt-neox-20b)
187
+
188
+ | Metric | Value |
189
+ |-----------------------|---------------------------|
190
+ | Avg. | 36.02 |
191
+ | ARC (25-shot) | 45.73 |
192
+ | HellaSwag (10-shot) | 73.45 |
193
+ | MMLU (5-shot) | 25.0 |
194
+ | TruthfulQA (0-shot) | 31.61 |
195
+ | Winogrande (5-shot) | 68.9 |
196
+ | GSM8K (5-shot) | 2.43 |
197
+ | DROP (3-shot) | 5.04 |
198
+ ","{""id"": ""EleutherAI/gpt-neox-20b"", ""author"": ""EleutherAI"", ""sha"": ""c292233c833e336628618a88a648727eb3dff0a7"", ""last_modified"": ""2024-01-31 20:30:35+00:00"", ""created_at"": ""2022-04-07 20:28:29+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 322738, ""downloads_all_time"": null, ""likes"": 559, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""gpt_neox"", ""text-generation"", ""causal-lm"", ""en"", ""dataset:EleutherAI/pile"", ""arxiv:2204.06745"", ""arxiv:2101.00027"", ""arxiv:2201.07311"", ""arxiv:2104.09864"", ""license:apache-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- EleutherAI/pile\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- pytorch\n- causal-lm"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPTNeoXForCausalLM""], ""model_type"": ""gpt_neox"", ""tokenizer_config"": {""unk_token"": ""<|endoftext|>"", ""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-00046.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00014-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00015-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00016-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00017-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00018-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00019-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00020-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00021-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00022-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00023-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00024-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00025-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00026-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00027-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00028-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00029-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00030-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00031-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00032-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00033-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00034-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00035-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00036-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00037-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00038-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00039-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00040-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00041-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00042-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00043-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00044-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00045-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00046-of-00046.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""olivierdehaene/chat-llm-streaming"", ""h2oai/h2ogpt-chatbot"", ""eduagarcia/open_pt_llm_leaderboard"", ""h2oai/h2ogpt-chatbot2"", ""Intel/low_bit_open_llm_leaderboard"", ""BAAI/open_cn_llm_leaderboard"", ""tomg-group-umd/lm-watermarking"", ""Sharathhebbar24/One-stop-for-Open-source-models"", ""qiantong-xu/toolbench-leaderboard"", ""monra/freegpt-webui"", ""gsaivinay/open_llm_leaderboard"", ""aimevzulari/Prompt_Uzmani"", ""reach-vb/mamba"", ""yenniejun/tokenizers-languages"", ""KBaba7/Quant"", ""justest/gpt4free"", ""ysharma/OSChatbots_ChatGPT_ToeToToe"", ""playgrdstar/compare-llms"", ""Gradio-Blocks/zero-and-few-shot-reasoning"", ""GTBench/GTBench"", ""Vikhrmodels/small-shlepa-lb"", ""NeuralInternet/ChatLLMs"", ""yhavinga/dutch-tokenizer-arena"", ""kastan/ai-teaching-assistant"", ""kz-transformers/kaz-llm-lb"", ""xzuyn/Token-Count-Comparison"", ""concedo/WebTokenizer"", ""felixz/open_llm_leaderboard"", ""bhaskartripathi/LLM_Quantization"", ""OPTML-Group/UnlearnCanvas-Benchmark"", ""5w4n/burmese-tokenizers"", ""totolook/Quant"", ""FallnAI/Quantize-HF-Models"", ""tekkonetes/Chatbots"", ""cloudqi/MultisourceChat"", ""g4f/freegpt-webui"", ""conceptofmind/PaLM_models"", ""BAAI/open_flageval_vlm_leaderboard"", ""CarperAI/pile-v2-eda"", ""knkarthick/chat-llm-streaming"", ""aegistudio/EleutherAI-gpt-neox-20b"", ""kastan/ai-teaching-assistant-beta"", ""b1sheng/kg_llm_leaderboard_test"", ""Fernando22/freegpt-webui"", ""g4f/g4f"", ""Zulelee/langchain-chatchat"", ""neubla/neubla-llm-evaluation-board"", ""lapsapking/h2ogpt-chatbot"", ""Msp/opensource_chat_assistants"", ""Chris4K/llms_compare"", ""inflaton/learn-ai"", ""DrBenjamin/AI_Demo"", ""Alfasign/chat-llm-streaming"", ""Alfasign/AchyuthGPT"", ""pikto/Elite-freegpt-webui"", ""andryMLOPS/ASTA-GPT-3.8_web_ui"", ""VickyKira/NASAGPT"", ""101-5/gpt4free"", ""rodrigomasini/data_only_open_llm_leaderboard"", ""Docfile/open_llm_leaderboard"", ""jijivski/FreshBench"", ""his0/h2ogpt-chatbot"", ""atimughal662/InfoFusion"", ""ArpitM/chat-llm-streaming"", ""RAHMAN00700/rahmans_watsonx"", ""ruslanmv/convert_to_gguf"", ""stanciu/EleutherAI-gpt-neox-20b"", ""henryr/EleutherAI-gpt-neox-20b"", ""trhacknon/h2ogpt-chatbot"", ""selvalogesh/chat-llm-streaming"", ""vs4vijay/h2ogpt-chatbot"", ""anonauthors/SecretLanguage"", ""infinisoft/opensource_chat_assistants"", ""mrsteyk/rwkv-rs"", ""bempensato/EleutherAI-gpt-neox-20b"", ""akalife/EleutherAI-gpt-neox-20b"", ""c0d3r69/EleutherAI-gpt-neox-20b"", ""shraey/EleutherAI-gpt-neox-20b"", ""MrMeeep6711/EleutherAI-gpt-neox-20b"", ""spnq/EleutherAI-gpt-neox-20b"", ""luosaidage/EleutherAI-gpt-neox-20b"", ""AFischer1985/chat-llm-streaming"", ""shideqin/EleutherAI-gpt-neox-20b"", ""kastan/chatbot-llm-streaming"", ""Toaster496/EleutherAI-gpt-neox-20b"", ""anonymous-aardvark/submission-2841-demo"", ""EveryPizza/EleutherAI-gpt-neox-20b"", ""JonnySaver/OSChatbots_ChatGPT_ToeToToe"", ""Stevross/GPTmodels-vs-ChatGPT"", ""sinkaroid/chat-llm-streaming"", ""onimakivan/fg"", ""star-nox/chat-llm-streaming"", ""akashkj/H2OGPT"", ""xnetba/tesnjakai"", ""osiloke/gpt4free_demo_english"", ""closee23/EleutherAI-gpt-neox-20b"", ""EinfachOlder/AchyuPT"", ""NULLNode/LMW"", ""NebulaVortex/EleutherAI-gpt-neox-20b"", ""ariel0330/h2osiri""], ""safetensors"": {""parameters"": {""F16"": 20554568208, ""U8"": 184549376}, ""total"": 20739117584}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-31 20:30:35+00:00"", ""cardData"": ""datasets:\n- EleutherAI/pile\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- pytorch\n- causal-lm"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""624f496dbcbeb0d535f47bcf"", ""modelId"": ""EleutherAI/gpt-neox-20b"", ""usedStorage"": 123731496044}",0,"https://huggingface.co/shivarama23/outputs, https://huggingface.co/solanotodeschini/outputs, https://huggingface.co/paragon-analytics/outputs, https://huggingface.co/vineetsharma/qlora-gpt-neox-20b-english_quotes, https://huggingface.co/Electricarchmage/outputs, https://huggingface.co/OpenVINO/gpt-neox-20b-fp16-ov, https://huggingface.co/Mahler60/Prueba, https://huggingface.co/ChatBotExploit/NexusV3",8,"https://huggingface.co/hipnologo/GPT-Neox-20b-QLoRA-FineTune-english_quotes_dataset, https://huggingface.co/shuvom/gpt-neox-20B-FT_RD-service, https://huggingface.co/Shyaboi/fine-tuned-adapters, https://huggingface.co/Mohammed-Altaf/instruct-finetuned-medical-20b-adapters, https://huggingface.co/CarlBrendt/gpt-neox-20b, https://huggingface.co/CarlBrendt/gpt-neox-20b_new, https://huggingface.co/CarlBrendt/gpt-neox-20b-arif, https://huggingface.co/Ogrex/Test-gpt-neox-20b, https://huggingface.co/Electricarchmage/ApocryphaGenerator, https://huggingface.co/motazel/llama-2-7b-chat-guanaco-motaz, https://huggingface.co/singhamal1710/cryptocompass, https://huggingface.co/MinusV25/einsteinecho, https://huggingface.co/tank028/gpt-neox-20b-finet, https://huggingface.co/aidiary/gpt-neox-20b-finetuned-english-quotes, https://huggingface.co/nurke/PEFT-v1-alpaca, https://huggingface.co/asoria/EleutherAI-gpt-neox-20b-english_quotes, https://huggingface.co/MinMaxolotl/outputs, https://huggingface.co/IAyamina/gptneo20b_on_instruction_poems, https://huggingface.co/UltraWolf/UltraWolfmodel",19,"https://huggingface.co/zhentaoyu/gpt-neox-20b-Q4_0-GGUF, https://huggingface.co/OpenVINO/gpt-neox-20b-int8-ov, https://huggingface.co/mradermacher/gpt-neox-20b-GGUF, https://huggingface.co/mradermacher/gpt-neox-20b-i1-GGUF, https://huggingface.co/tensorblock/gpt-neox-20b-GGUF",5,,0,"BAAI/open_cn_llm_leaderboard, GTBench/GTBench, Gradio-Blocks/zero-and-few-shot-reasoning, Intel/low_bit_open_llm_leaderboard, KBaba7/Quant, Sharathhebbar24/One-stop-for-Open-source-models, Vikhrmodels/small-shlepa-lb, aimevzulari/Prompt_Uzmani, eduagarcia/open_pt_llm_leaderboard, huggingface/InferenceSupport/discussions/new?title=EleutherAI/gpt-neox-20b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BEleutherAI%2Fgpt-neox-20b%5D(%2FEleutherAI%2Fgpt-neox-20b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, monra/freegpt-webui, qiantong-xu/toolbench-leaderboard, reach-vb/mamba",13
199
+ shivarama23/outputs,"---
200
+ license: apache-2.0
201
+ base_model: EleutherAI/gpt-neox-20b
202
+ tags:
203
+ - generated_from_trainer
204
+ model-index:
205
+ - name: outputs
206
+ results: []
207
+ ---
208
+
209
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
210
+ should probably proofread and complete it, then remove this comment. -->
211
+
212
+ # outputs
213
+
214
+ This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on an unknown dataset.
215
+
216
+ ## Model description
217
+
218
+ More information needed
219
+
220
+ ## Intended uses & limitations
221
+
222
+ More information needed
223
+
224
+ ## Training and evaluation data
225
+
226
+ More information needed
227
+
228
+ ## Training procedure
229
+
230
+ ### Training hyperparameters
231
+
232
+ The following hyperparameters were used during training:
233
+ - learning_rate: 0.0002
234
+ - train_batch_size: 1
235
+ - eval_batch_size: 8
236
+ - seed: 42
237
+ - gradient_accumulation_steps: 8
238
+ - total_train_batch_size: 8
239
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
240
+ - lr_scheduler_type: linear
241
+ - lr_scheduler_warmup_steps: 2
242
+ - training_steps: 20
243
+
244
+ ### Training results
245
+
246
+
247
+
248
+ ### Framework versions
249
+
250
+ - Transformers 4.32.0.dev0
251
+ - Pytorch 2.0.1+cu118
252
+ - Datasets 2.13.1
253
+ - Tokenizers 0.13.3
254
+ ","{""id"": ""shivarama23/outputs"", ""author"": ""shivarama23"", ""sha"": ""92fdd959b735c144e8adb58fae7b8fff389ccbb1"", ""last_modified"": ""2023-07-20 09:05:26+00:00"", ""created_at"": ""2023-07-20 08:59:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""outputs"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-07-20 09:05:26+00:00"", ""cardData"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""transformersInfo"": null, ""_id"": ""64b8f78af5311dd038284179"", ""modelId"": ""shivarama23/outputs"", ""usedStorage"": 34639476}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=shivarama23/outputs&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bshivarama23%2Foutputs%5D(%2Fshivarama23%2Foutputs)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
255
+ solanotodeschini/outputs,"---
256
+ license: apache-2.0
257
+ base_model: EleutherAI/gpt-neox-20b
258
+ tags:
259
+ - generated_from_trainer
260
+ model-index:
261
+ - name: outputs
262
+ results: []
263
+ ---
264
+
265
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
266
+ should probably proofread and complete it, then remove this comment. -->
267
+
268
+ # outputs
269
+
270
+ This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on the None dataset.
271
+
272
+ ## Model description
273
+
274
+ More information needed
275
+
276
+ ## Intended uses & limitations
277
+
278
+ More information needed
279
+
280
+ ## Training and evaluation data
281
+
282
+ More information needed
283
+
284
+ ## Training procedure
285
+
286
+ ### Training hyperparameters
287
+
288
+ The following hyperparameters were used during training:
289
+ - learning_rate: 0.0002
290
+ - train_batch_size: 1
291
+ - eval_batch_size: 8
292
+ - seed: 42
293
+ - gradient_accumulation_steps: 4
294
+ - total_train_batch_size: 4
295
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
296
+ - lr_scheduler_type: linear
297
+ - lr_scheduler_warmup_steps: 2
298
+ - training_steps: 50
299
+
300
+ ### Training results
301
+
302
+
303
+
304
+ ### Framework versions
305
+
306
+ - Transformers 4.32.1
307
+ - Pytorch 2.0.1+cu118
308
+ - Datasets 2.14.4
309
+ - Tokenizers 0.13.3
310
+ ","{""id"": ""solanotodeschini/outputs"", ""author"": ""solanotodeschini"", ""sha"": ""b0e7b98683f8b44eda6d2b2f5c3261808e5b49c8"", ""last_modified"": ""2023-08-31 14:49:52+00:00"", ""created_at"": ""2023-08-31 14:49:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""outputs"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-08-31 14:49:52+00:00"", ""cardData"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""transformersInfo"": null, ""_id"": ""64f0a88d31d8a032abbee70f"", ""modelId"": ""solanotodeschini/outputs"", ""usedStorage"": 34639540}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=solanotodeschini/outputs&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsolanotodeschini%2Foutputs%5D(%2Fsolanotodeschini%2Foutputs)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
311
+ paragon-analytics/outputs,"---
312
+ license: apache-2.0
313
+ base_model: EleutherAI/gpt-neox-20b
314
+ tags:
315
+ - generated_from_trainer
316
+ model-index:
317
+ - name: outputs
318
+ results: []
319
+ ---
320
+
321
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
322
+ should probably proofread and complete it, then remove this comment. -->
323
+
324
+ # outputs
325
+
326
+ This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on an unknown dataset.
327
+
328
+ ## Model description
329
+
330
+ More information needed
331
+
332
+ ## Intended uses & limitations
333
+
334
+ More information needed
335
+
336
+ ## Training and evaluation data
337
+
338
+ More information needed
339
+
340
+ ## Training procedure
341
+
342
+ ### Training hyperparameters
343
+
344
+ The following hyperparameters were used during training:
345
+ - learning_rate: 0.0002
346
+ - train_batch_size: 2
347
+ - eval_batch_size: 16
348
+ - seed: 42
349
+ - gradient_accumulation_steps: 4
350
+ - total_train_batch_size: 8
351
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
352
+ - lr_scheduler_type: linear
353
+ - lr_scheduler_warmup_steps: 2
354
+ - training_steps: 10
355
+
356
+ ### Training results
357
+
358
+
359
+
360
+ ### Framework versions
361
+
362
+ - Transformers 4.34.0.dev0
363
+ - Pytorch 2.0.1+cu117
364
+ - Datasets 2.14.5
365
+ - Tokenizers 0.13.3
366
+ ","{""id"": ""paragon-analytics/outputs"", ""author"": ""paragon-analytics"", ""sha"": ""d8e9e1da4e83fb240849f05ab57ff92d80ef737c"", ""last_modified"": ""2023-09-13 12:34:14+00:00"", ""created_at"": ""2023-09-13 12:34:12+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""outputs"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-13 12:34:14+00:00"", ""cardData"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""transformersInfo"": null, ""_id"": ""6501ac44e55d031001328210"", ""modelId"": ""paragon-analytics/outputs"", ""usedStorage"": 34639540}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=paragon-analytics/outputs&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bparagon-analytics%2Foutputs%5D(%2Fparagon-analytics%2Foutputs)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
367
+ vineetsharma/qlora-gpt-neox-20b-english_quotes,"---
368
+ license: apache-2.0
369
+ base_model: EleutherAI/gpt-neox-20b
370
+ tags:
371
+ - generated_from_trainer
372
+ model-index:
373
+ - name: qlora-gpt-neox-20b-english_quotes
374
+ results: []
375
+ ---
376
+
377
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
378
+ should probably proofread and complete it, then remove this comment. -->
379
+
380
+ # qlora-gpt-neox-20b-english_quotes
381
+
382
+ This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on an unknown dataset.
383
+
384
+ ## Model description
385
+
386
+ More information needed
387
+
388
+ ## Intended uses & limitations
389
+
390
+ More information needed
391
+
392
+ ## Training and evaluation data
393
+
394
+ More information needed
395
+
396
+ ## Training procedure
397
+
398
+ ### Training hyperparameters
399
+
400
+ The following hyperparameters were used during training:
401
+ - learning_rate: 0.0002
402
+ - train_batch_size: 1
403
+ - eval_batch_size: 8
404
+ - seed: 42
405
+ - gradient_accumulation_steps: 4
406
+ - total_train_batch_size: 4
407
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
408
+ - lr_scheduler_type: linear
409
+ - lr_scheduler_warmup_steps: 2
410
+ - training_steps: 10
411
+
412
+ ### Training results
413
+
414
+
415
+
416
+ ### Framework versions
417
+
418
+ - Transformers 4.34.0.dev0
419
+ - Pytorch 2.0.1+cu118
420
+ - Datasets 2.14.5
421
+ - Tokenizers 0.14.0
422
+ ","{""id"": ""vineetsharma/qlora-gpt-neox-20b-english_quotes"", ""author"": ""vineetsharma"", ""sha"": ""43a91b283185adbb0d0fd253edce1bf73a3d649e"", ""last_modified"": ""2023-09-30 06:57:25+00:00"", ""created_at"": ""2023-09-30 06:46:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""generated_from_trainer"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: qlora-gpt-neox-20b-english_quotes\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""qlora-gpt-neox-20b-english_quotes"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-09-30 06:57:25+00:00"", ""cardData"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: qlora-gpt-neox-20b-english_quotes\n results: []"", ""transformersInfo"": null, ""_id"": ""6517c432a1a5e5d6178ba519"", ""modelId"": ""vineetsharma/qlora-gpt-neox-20b-english_quotes"", ""usedStorage"": 69279208}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=vineetsharma/qlora-gpt-neox-20b-english_quotes&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bvineetsharma%2Fqlora-gpt-neox-20b-english_quotes%5D(%2Fvineetsharma%2Fqlora-gpt-neox-20b-english_quotes)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
423
+ Electricarchmage/outputs,"---
424
+ license: apache-2.0
425
+ base_model: EleutherAI/gpt-neox-20b
426
+ tags:
427
+ - generated_from_trainer
428
+ model-index:
429
+ - name: outputs
430
+ results: []
431
+ ---
432
+
433
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
434
+ should probably proofread and complete it, then remove this comment. -->
435
+
436
+ # outputs
437
+
438
+ This model is a fine-tuned version of [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) on an unknown dataset.
439
+
440
+ ## Model description
441
+
442
+ More information needed
443
+
444
+ ## Intended uses & limitations
445
+
446
+ More information needed
447
+
448
+ ## Training and evaluation data
449
+
450
+ More information needed
451
+
452
+ ## Training procedure
453
+
454
+ ### Training hyperparameters
455
+
456
+ The following hyperparameters were used during training:
457
+ - learning_rate: 0.0002
458
+ - train_batch_size: 1
459
+ - eval_batch_size: 8
460
+ - seed: 42
461
+ - gradient_accumulation_steps: 4
462
+ - total_train_batch_size: 4
463
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
464
+ - lr_scheduler_type: linear
465
+ - lr_scheduler_warmup_steps: 2
466
+ - training_steps: 10
467
+ - mixed_precision_training: Native AMP
468
+
469
+ ### Training results
470
+
471
+
472
+
473
+ ### Framework versions
474
+
475
+ - Transformers 4.36.0.dev0
476
+ - Pytorch 2.1.0+cu118
477
+ - Datasets 2.15.0
478
+ - Tokenizers 0.15.0
479
+ ","{""id"": ""Electricarchmage/outputs"", ""author"": ""Electricarchmage"", ""sha"": ""c896238d46f2f012e0bb3068296933268653f8e8"", ""last_modified"": ""2023-12-02 06:08:18+00:00"", ""created_at"": ""2023-12-02 06:01:49+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": null, ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""tensorboard"", ""safetensors"", ""generated_from_trainer"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""outputs"", ""results"": []}], ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='adapter_model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Dec02_05-49-59_2be63012f009/events.out.tfevents.1701496202.2be63012f009.246.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-12-02 06:08:18+00:00"", ""cardData"": ""base_model: EleutherAI/gpt-neox-20b\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: outputs\n results: []"", ""transformersInfo"": null, ""_id"": ""656ac84d271c5c4e33ca66dd"", ""modelId"": ""Electricarchmage/outputs"", ""usedStorage"": 34627242}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Electricarchmage/outputs&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BElectricarchmage%2Foutputs%5D(%2FElectricarchmage%2Foutputs)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
480
+ OpenVINO/gpt-neox-20b-fp16-ov,"---
481
+ license: apache-2.0
482
+ license_link: https://choosealicense.com/licenses/apache-2.0/
483
+ base_model:
484
+ - EleutherAI/gpt-neox-20b
485
+ ---
486
+ # gpt-neox-20b-fp16-ov
487
+ * Model creator: [EleutherAI](https://huggingface.co/EleutherAI)
488
+ * Original model: [gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b)
489
+
490
+ ## Description
491
+
492
+ This is [gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) model converted to the [OpenVINO™ IR](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) (Intermediate Representation) format.
493
+
494
+ ## Compatibility
495
+
496
+ The provided OpenVINO™ IR model is compatible with:
497
+
498
+ * OpenVINO version 2024.2.0 and higher
499
+ * Optimum Intel 1.19.0 and higher
500
+
501
+ # Running Model Inference with [Optimum Intel](https://huggingface.co/docs/optimum/intel/index)
502
+
503
+ <!-- Example model usage -->
504
+
505
+ 1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend:
506
+
507
+ ```
508
+ pip install optimum[openvino]
509
+ ```
510
+
511
+ 2. Run model inference:
512
+
513
+ <!-- Usage example can be adopted from original model usage example -->
514
+
515
+ ```
516
+ from transformers import AutoTokenizer
517
+ from optimum.intel.openvino import OVModelForCausalLM
518
+
519
+ model_id = ""OpenVINO/gpt-neox-20b-fp16-ov""
520
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
521
+ model = OVModelForCausalLM.from_pretrained(model_id)
522
+
523
+ inputs = tokenizer(""What is OpenVINO?"", return_tensors=""pt"")
524
+
525
+ outputs = model.generate(**inputs, max_length=200)
526
+ text = tokenizer.batch_decode(outputs)[0]
527
+ print(text)
528
+ ```
529
+
530
+ For more examples and possible optimizations, refer to the [OpenVINO Large Language Model Inference Guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).
531
+
532
+ <!-- Usage example with OpenVINO GenAI if applicable -->
533
+ ## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai)
534
+
535
+ 1. Install packages required for using OpenVINO GenAI.
536
+ ```
537
+ pip install openvino-genai huggingface_hub
538
+ ```
539
+
540
+ 2. Download model from HuggingFace Hub
541
+
542
+ ```
543
+ import huggingface_hub as hf_hub
544
+
545
+ model_id = ""OpenVINO/gpt-neox-20b-fp16-ov""
546
+ model_path = ""gpt-neox-20b-fp16-ov""
547
+
548
+ hf_hub.snapshot_download(model_id, local_dir=model_path)
549
+
550
+ ```
551
+
552
+ 3. Run model inference:
553
+
554
+ ```
555
+ import openvino_genai as ov_genai
556
+
557
+ device = ""CPU""
558
+ pipe = ov_genai.LLMPipeline(model_path, device)
559
+ print(pipe.generate(""What is OpenVINO?"", max_length=200))
560
+ ```
561
+
562
+ More GenAI usage examples can be found in OpenVINO GenAI library [docs](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples)
563
+
564
+ ## Limitations
565
+
566
+ Check the original model card for [limitations](https://huggingface.co/EleutherAI/gpt-neox-20b#limitations-and-biases).
567
+
568
+ ## Legal information
569
+
570
+ The original model is distributed under [apache-2.0](https://choosealicense.com/licenses/apache-2.0/) license. More details can be found in [original model card](https://huggingface.co/EleutherAI/gpt-neox-20b).
571
+
572
+ ## Disclaimer
573
+
574
+ Intel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.","{""id"": ""OpenVINO/gpt-neox-20b-fp16-ov"", ""author"": ""OpenVINO"", ""sha"": ""26f59f41ad3349558584cfc188b20e0b428e9db8"", ""last_modified"": ""2024-11-05 10:05:15+00:00"", ""created_at"": ""2024-06-21 06:39:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""openvino"", ""gpt_neox"", ""text-generation"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- EleutherAI/gpt-neox-20b\nlicense: apache-2.0\nlicense_link: https://choosealicense.com/licenses/apache-2.0/"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPTNeoXForCausalLM""], ""model_type"": ""gpt_neox"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": null, ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_detokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_model.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='openvino_tokenizer.xml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-05 10:05:15+00:00"", ""cardData"": ""base_model:\n- EleutherAI/gpt-neox-20b\nlicense: apache-2.0\nlicense_link: https://choosealicense.com/licenses/apache-2.0/"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66752017dbe75ba7861c77e3"", ""modelId"": ""OpenVINO/gpt-neox-20b-fp16-ov"", ""usedStorage"": 41114735255}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=OpenVINO/gpt-neox-20b-fp16-ov&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BOpenVINO%2Fgpt-neox-20b-fp16-ov%5D(%2FOpenVINO%2Fgpt-neox-20b-fp16-ov)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
575
+ Mahler60/Prueba,"---
576
+ language:
577
+ - es
578
+ - en
579
+ base_model:
580
+ - EleutherAI/gpt-neox-20b
581
+ tags:
582
+ - neox
583
+ - chatbot
584
+ - español
585
+ metrics:
586
+ - bleu
587
+ - rouge
588
+ - bertscore
589
+ pipeline_tag: text-generation
590
+ library_name: transformers
591
+ ---","{""id"": ""Mahler60/Prueba"", ""author"": ""Mahler60"", ""sha"": ""85f42071a4b6b8e4e3c19101dea2b9d0c72c2a93"", ""last_modified"": ""2024-11-27 15:45:38+00:00"", ""created_at"": ""2024-11-26 17:32:35+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""gpt_neox"", ""neox"", ""chatbot"", ""espa\u00f1ol"", ""text-generation"", ""es"", ""en"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- EleutherAI/gpt-neox-20b\nlanguage:\n- es\n- en\nlibrary_name: transformers\nmetrics:\n- bleu\n- rouge\n- bertscore\npipeline_tag: text-generation\ntags:\n- neox\n- chatbot\n- espa\u00f1ol"", ""widget_data"": [{""text"": ""Me llamo Julien y me gusta""}, {""text"": ""Me llamo Thomas y mi principal""}, {""text"": ""Me llamo Manuel y trabajo en""}, {""text"": ""\u00c9rase una vez,""}, {""text"": ""Si t\u00fa me dices ven, ""}], ""model_index"": null, ""config"": {""architectures"": [""Mahler60/Prueba""], ""model_type"": ""gpt_neox""}, ""transformers_info"": {""auto_model"": ""Mahler60/Prueba"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='fine_tunnig_yuuka.py.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-27 15:45:38+00:00"", ""cardData"": ""base_model:\n- EleutherAI/gpt-neox-20b\nlanguage:\n- es\n- en\nlibrary_name: transformers\nmetrics:\n- bleu\n- rouge\n- bertscore\npipeline_tag: text-generation\ntags:\n- neox\n- chatbot\n- espa\u00f1ol"", ""transformersInfo"": {""auto_model"": ""Mahler60/Prueba"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": ""AutoTokenizer""}, ""_id"": ""6746063308678f45e12b9d75"", ""modelId"": ""Mahler60/Prueba"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Mahler60/Prueba&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMahler60%2FPrueba%5D(%2FMahler60%2FPrueba)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
592
+ ChatBotExploit/NexusV3,"---
593
+ base_model:
594
+ - EleutherAI/gpt-neox-20b
595
+ tags:
596
+ - code
597
+ datasets:
598
+ - Roblox/luau_corpus
599
+ library_name: flair
600
+ ---","{""id"": ""ChatBotExploit/NexusV3"", ""author"": ""ChatBotExploit"", ""sha"": ""79c2c5a6d2365b4f8099b5212fdd438e4b47c94d"", ""last_modified"": ""2024-11-29 14:20:41+00:00"", ""created_at"": ""2024-11-29 14:16:22+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""flair"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""flair"", ""code"", ""dataset:Roblox/luau_corpus"", ""base_model:EleutherAI/gpt-neox-20b"", ""base_model:finetune:EleutherAI/gpt-neox-20b"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- EleutherAI/gpt-neox-20b\ndatasets:\n- Roblox/luau_corpus\nlibrary_name: flair\ntags:\n- code"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-11-29 14:20:41+00:00"", ""cardData"": ""base_model:\n- EleutherAI/gpt-neox-20b\ndatasets:\n- Roblox/luau_corpus\nlibrary_name: flair\ntags:\n- code"", ""transformersInfo"": null, ""_id"": ""6749ccb6761588a1fdf2b93a"", ""modelId"": ""ChatBotExploit/NexusV3"", ""usedStorage"": 0}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=ChatBotExploit/NexusV3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BChatBotExploit%2FNexusV3%5D(%2FChatBotExploit%2FNexusV3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
gpt2-medium_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
The diff for this file is too large to render. See raw diff