midah commited on
Commit
7e69b6e
·
verified ·
1 Parent(s): 2f1c53b

Add files using upload-large-folder tool

Browse files
Files changed (39) hide show
  1. .gitattributes +14 -0
  2. AnimateLCM_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +43 -0
  3. DeepSeek-V2-Chat-0628_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +263 -0
  4. Kimi-VL-A3B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +478 -0
  5. Llama-3-8B-Instruct-Gradient-1048k_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv +913 -0
  6. Llama-3-8B-Web_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +391 -0
  7. ModernBERT-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +3 -0
  8. ModernBERT-large_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
  9. PairRM_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv +281 -0
  10. Qwen2-7B-Instruct-GGUF_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +104 -0
  11. SkyReels-V1-Hunyuan_comfy_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv +17 -0
  12. all-MiniLM-L6-v2_finetunes_20250422_225920.csv +3 -0
  13. all-mpnet-base-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +3 -0
  14. bert-base-cased_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +3 -0
  15. bert-base-multilingual-cased_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +3 -0
  16. bge-m3_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +3 -0
  17. bloomz_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +0 -0
  18. chatglm-fitness-RLHF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +148 -0
  19. cpm-bee-10b_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +108 -0
  20. dolly-v1-6b_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +223 -0
  21. faster-whisper-large-v3_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +143 -0
  22. gemma-scope_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +80 -0
  23. gpt2_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv +3 -0
  24. gte-Qwen2-7B-instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +0 -0
  25. hallucination_evaluation_model_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +184 -0
  26. layoutlm-document-qa_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +119 -0
  27. lora_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +0 -0
  28. multilingual-e5-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +3 -0
  29. paraphrase-multilingual-MiniLM-L12-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv +3 -0
  30. potat1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +60 -0
  31. roberta-base_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +3 -0
  32. t5-small_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv +3 -0
  33. text2image-prompt-generator_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv +23 -0
  34. vit-base-patch16-224-in21k_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +3 -0
  35. whisper-large-v2_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv +0 -0
  36. whisper-large-v3-turbo_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv +0 -0
  37. whisper-medusa-v1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv +70 -0
  38. whisper-small_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv +3 -0
  39. whisper-tiny_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv +3 -0
.gitattributes CHANGED
@@ -57,3 +57,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ bert-base-multilingual-cased_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv filter=lfs diff=lfs merge=lfs -text
61
+ roberta-base_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv filter=lfs diff=lfs merge=lfs -text
62
+ all-mpnet-base-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv filter=lfs diff=lfs merge=lfs -text
63
+ vit-base-patch16-224-in21k_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv filter=lfs diff=lfs merge=lfs -text
64
+ multilingual-e5-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv filter=lfs diff=lfs merge=lfs -text
65
+ all-MiniLM-L6-v2_finetunes_20250422_225920.csv filter=lfs diff=lfs merge=lfs -text
66
+ whisper-small_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv filter=lfs diff=lfs merge=lfs -text
67
+ gpt2_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv filter=lfs diff=lfs merge=lfs -text
68
+ bert-base-cased_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv filter=lfs diff=lfs merge=lfs -text
69
+ t5-small_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv filter=lfs diff=lfs merge=lfs -text
70
+ whisper-tiny_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv filter=lfs diff=lfs merge=lfs -text
71
+ bge-m3_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv filter=lfs diff=lfs merge=lfs -text
72
+ paraphrase-multilingual-MiniLM-L12-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv filter=lfs diff=lfs merge=lfs -text
73
+ ModernBERT-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv filter=lfs diff=lfs merge=lfs -text
AnimateLCM_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ wangfuyun/AnimateLCM,"---
3
+ pipeline_tag: text-to-video
4
+ ---
5
+ # AnimateLCM for Fast Video Generation in 4 steps.
6
+
7
+ [AnimateLCM: Computation-Efficient Personalized Style Video Generation without Personalized Video Data](https://arxiv.org/abs/2402.00769) by Fu-Yun Wang et al.
8
+
9
+ ## We also support fast image-to-video generation, please see [AnimateLCM-SVD-xt](https://huggingface.co/wangfuyun/AnimateLCM-SVD-xt) and [AnimateLCM-I2V](https://huggingface.co/wangfuyun/AnimateLCM-I2V).
10
+
11
+ For more details, please refer to our [[paper](https://arxiv.org/abs/2402.00769)] | [[code](https://github.com/G-U-N/AnimateLCM)] | [[proj-page](https://animatelcm.github.io/)] | [[civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation)].
12
+
13
+ <video controls autoplay src=""https://cdn-uploads.huggingface.co/production/uploads/63e9e92f20c109718713f5eb/KCwSoZCdxkkmtDg1LuXsP.mp4""></video>
14
+
15
+ ## Using AnimateLCM with Diffusers
16
+
17
+ ```python
18
+ import torch
19
+ from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
20
+ from diffusers.utils import export_to_gif
21
+
22
+ adapter = MotionAdapter.from_pretrained(""wangfuyun/AnimateLCM"", torch_dtype=torch.float16)
23
+ pipe = AnimateDiffPipeline.from_pretrained(""emilianJR/epiCRealism"", motion_adapter=adapter, torch_dtype=torch.float16)
24
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule=""linear"")
25
+
26
+ pipe.load_lora_weights(""wangfuyun/AnimateLCM"", weight_name=""AnimateLCM_sd15_t2v_lora.safetensors"", adapter_name=""lcm-lora"")
27
+ pipe.set_adapters([""lcm-lora""], [0.8])
28
+
29
+ pipe.enable_vae_slicing()
30
+ pipe.enable_model_cpu_offload()
31
+
32
+ output = pipe(
33
+ prompt=""A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution"",
34
+ negative_prompt=""bad quality, worse quality, low resolution"",
35
+ num_frames=16,
36
+ guidance_scale=2.0,
37
+ num_inference_steps=6,
38
+ generator=torch.Generator(""cpu"").manual_seed(0),
39
+ )
40
+ frames = output.frames[0]
41
+ export_to_gif(frames, ""animatelcm.gif"")
42
+ ```
43
+ ","{""id"": ""wangfuyun/AnimateLCM"", ""author"": ""wangfuyun"", ""sha"": ""3d4d00fc113225e1040f4d3bec504b6ec750c10c"", ""last_modified"": ""2024-10-16 13:16:32+00:00"", ""created_at"": ""2024-02-03 05:59:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 33977, ""downloads_all_time"": null, ""likes"": 323, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""safetensors"", ""text-to-video"", ""arxiv:2402.00769"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""pipeline_tag: text-to-video"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='AnimateLCM_sd15_t2v.ckpt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='AnimateLCM_sd15_t2v_lora.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.fp16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='diffusion_pytorch_model.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [""wangfuyun/AnimateLCM-SVD"", ""wangfuyun/AnimateLCM"", ""HongcanGuo/InspiroV"", ""K00B404/AnimateDiff-Lightning"", ""jbilcke-hf/ai-tube-model-animatelcm"", ""rynmurdock/Blue_Tigers"", ""Spanicin/ttv"", ""fantos/vidiani"", ""Ziaistan/AnimateLCM-SVD"", ""Taf2023/AnimateLCM"", ""sanaweb/AnimateLCM"", ""kanishka089/GIFGenerator"", ""svjack/AnimateLCM-SVD-Genshin-Impact-Demo"", ""jnjj/Ggggggg""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-16 13:16:32+00:00"", ""cardData"": ""pipeline_tag: text-to-video"", ""transformersInfo"": null, ""_id"": ""65bdd652d6d0ffbcebddfdff"", ""modelId"": ""wangfuyun/AnimateLCM"", ""usedStorage"": 4670695581}",0,,0,,0,,0,,0,"HongcanGuo/InspiroV, K00B404/AnimateDiff-Lightning, Spanicin/ttv, Taf2023/AnimateLCM, Ziaistan/AnimateLCM-SVD, fantos/vidiani, huggingface/InferenceSupport/discussions/new?title=wangfuyun/AnimateLCM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bwangfuyun%2FAnimateLCM%5D(%2Fwangfuyun%2FAnimateLCM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jbilcke-hf/ai-tube-model-animatelcm, kanishka089/GIFGenerator, rynmurdock/Blue_Tigers, sanaweb/AnimateLCM, wangfuyun/AnimateLCM, wangfuyun/AnimateLCM-SVD",13
DeepSeek-V2-Chat-0628_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ deepseek-ai/DeepSeek-V2-Chat-0628,"---
3
+ license: other
4
+ license_name: deepseek
5
+ license_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL
6
+ ---
7
+
8
+ <!-- markdownlint-disable first-line-h1 -->
9
+ <!-- markdownlint-disable html -->
10
+ <!-- markdownlint-disable no-duplicate-header -->
11
+
12
+ <div align=""center"">
13
+ <img src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true"" width=""60%"" alt=""DeepSeek-V2"" />
14
+ </div>
15
+ <hr>
16
+ <div align=""center"" style=""line-height: 1;"">
17
+ <a href=""https://www.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
18
+ <img alt=""Homepage"" src=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true"" style=""display: inline-block; vertical-align: middle;""/>
19
+ </a>
20
+ <a href=""https://chat.deepseek.com/"" target=""_blank"" style=""margin: 2px;"">
21
+ <img alt=""Chat"" src=""https://img.shields.io/badge/🤖%20Chat-DeepSeek%20V2-536af5?color=536af5&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
22
+ </a>
23
+ <a href=""https://huggingface.co/deepseek-ai"" target=""_blank"" style=""margin: 2px;"">
24
+ <img alt=""Hugging Face"" src=""https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
25
+ </a>
26
+ </div>
27
+
28
+ <div align=""center"" style=""line-height: 1;"">
29
+ <a href=""https://discord.gg/Tc7c45Zzu5"" target=""_blank"" style=""margin: 2px;"">
30
+ <img alt=""Discord"" src=""https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da"" style=""display: inline-block; vertical-align: middle;""/>
31
+ </a>
32
+ <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true"" target=""_blank"" style=""margin: 2px;"">
33
+ <img alt=""Wechat"" src=""https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
34
+ </a>
35
+ <a href=""https://twitter.com/deepseek_ai"" target=""_blank"" style=""margin: 2px;"">
36
+ <img alt=""Twitter Follow"" src=""https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white"" style=""display: inline-block; vertical-align: middle;""/>
37
+ </a>
38
+ </div>
39
+
40
+ <div align=""center"" style=""line-height: 1;"">
41
+ <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-CODE"" style=""margin: 2px;"">
42
+ <img alt=""Code License"" src=""https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
43
+ </a>
44
+ <a href=""https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"" style=""margin: 2px;"">
45
+ <img alt=""Model License"" src=""https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53"" style=""display: inline-block; vertical-align: middle;""/>
46
+ </a>
47
+ </div>
48
+
49
+ <p align=""center"">
50
+ <a href=""#2-model-downloads"">Model Download</a> |
51
+ <a href=""#3-evaluation-results"">Evaluation Results</a> |
52
+ <a href=""#4-model-architecture"">Model Architecture</a> |
53
+ <a href=""#6-api-platform"">API Platform</a> |
54
+ <a href=""#8-license"">License</a> |
55
+ <a href=""#9-citation"">Citation</a>
56
+ </p>
57
+
58
+ <p align=""center"">
59
+ <a href=""https://arxiv.org/abs/2405.04434""><b>Paper Link</b>👁️</a>
60
+ </p>
61
+
62
+ # DeepSeek-V2-Chat-0628
63
+
64
+ ## 1. Introduction
65
+
66
+ DeepSeek-V2-Chat-0628 is an improved version of DeepSeek-V2-Chat. For model details, please visit [DeepSeek-V2 page](https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat) for more information.
67
+
68
+ DeepSeek-V2-Chat-0628 has achieved remarkable performance on the LMSYS Chatbot Arena Leaderboard:
69
+
70
+ Overall Ranking: #11, outperforming all other open-source models.
71
+
72
+ <p align=""center"">
73
+ <img width=""90%"" src=""figures/arena1.jpeg"" />
74
+ </p>
75
+
76
+ Coding Arena Ranking: #3, showcasing exceptional capabilities in coding tasks.
77
+
78
+ <p align=""center"">
79
+ <img width=""90%"" src=""figures/arena2.png"" />
80
+ </p>
81
+
82
+ Hard Prompts Arena Ranking: #3, demonstrating strong performance on challenging prompts.
83
+
84
+ <p align=""center"">
85
+ <img width=""90%"" src=""figures/arena3.png"" />
86
+ </p>
87
+
88
+ ## 2. Improvement
89
+
90
+ Compared to the previous version DeepSeek-V2-Chat, the new version has made the following improvements:
91
+
92
+ | **Benchmark** | **DeepSeek-V2-Chat** | **DeepSeek-V2-Chat-0628** | **Improvement** |
93
+ |:-----------:|:------------:|:---------------:|:-------------------------:|
94
+ | **HumanEval** | 81.1 | 84.8 | +3.7 |
95
+ | **MATH** | 53.9 | 71.0 | +17.1 |
96
+ | **BBH** | 79.7 | 83.4 | +3.7 |
97
+ | **IFEval** | 63.8 | 77.6 | +13.8 |
98
+ | **Arena-Hard** | 41.6 | 68.3 | +26.7 |
99
+ | **JSON Output (Internal)** | 78 | 85 | +7 |
100
+
101
+ Furthermore, the instruction following capability in the ""system"" area has been optimized, significantly enhancing the user experience for immersive translation, RAG, and other tasks.
102
+
103
+ ## 3. How to run locally
104
+
105
+ **To utilize DeepSeek-V2-Chat-0628 in BF16 format for inference, 80GB*8 GPUs are required.**
106
+ ### Inference with Huggingface's Transformers
107
+ You can directly employ [Huggingface's Transformers](https://github.com/huggingface/transformers) for model inference.
108
+
109
+ ```python
110
+ import torch
111
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
112
+
113
+ model_name = ""deepseek-ai/DeepSeek-V2-Chat-0628""
114
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
115
+ # `max_memory` should be set based on your devices
116
+ max_memory = {i: ""75GB"" for i in range(8)}
117
+ # `device_map` cannot be set to `auto`
118
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map=""sequential"", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation=""eager"")
119
+ model.generation_config = GenerationConfig.from_pretrained(model_name)
120
+ model.generation_config.pad_token_id = model.generation_config.eos_token_id
121
+
122
+ messages = [
123
+ {""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++""}
124
+ ]
125
+ input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
126
+ outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
127
+
128
+ result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
129
+ print(result)
130
+ ```
131
+
132
+ The complete chat template can be found within `tokenizer_config.json` located in the huggingface model repository.
133
+
134
+ **Note: The chat template has been updated compared to the previous DeepSeek-V2-Chat version.**
135
+
136
+ An example of chat template is as belows:
137
+
138
+ ```bash
139
+ <|begin▁of▁sentence|><|User|>{user_message_1}<|Assistant|>{assistant_message_1}<|end▁of▁sentence|><|User|>{user_message_2}<|Assistant|>
140
+ ```
141
+
142
+ You can also add an optional system message:
143
+
144
+ ```bash
145
+ <|begin▁of▁sentence|>{system_message}
146
+
147
+ <|User|>{user_message_1}<|Assistant|>{assistant_message_1}<|end▁of▁sentence|><|User|>{user_message_2}<|Assistant|>
148
+ ```
149
+
150
+ ### Inference with vLLM (recommended)
151
+ To utilize [vLLM](https://github.com/vllm-project/vllm) for model inference, please merge this Pull Request into your vLLM codebase: https://github.com/vllm-project/vllm/pull/4650.
152
+
153
+ ```python
154
+ from transformers import AutoTokenizer
155
+ from vllm import LLM, SamplingParams
156
+
157
+ max_model_len, tp_size = 8192, 8
158
+ model_name = ""deepseek-ai/DeepSeek-V2-Chat-0628""
159
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
160
+ llm = LLM(model=model_name, tensor_parallel_size=tp_size, max_model_len=max_model_len, trust_remote_code=True, enforce_eager=True)
161
+ sampling_params = SamplingParams(temperature=0.3, max_tokens=256, stop_token_ids=[tokenizer.eos_token_id])
162
+
163
+ messages_list = [
164
+ [{""role"": ""user"", ""content"": ""Who are you?""}],
165
+ [{""role"": ""user"", ""content"": ""Translate the following content into Chinese directly: DeepSeek-V2 adopts innovative architectures to guarantee economical training and efficient inference.""}],
166
+ [{""role"": ""user"", ""content"": ""Write a piece of quicksort code in C++.""}],
167
+ ]
168
+
169
+ prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list]
170
+
171
+ outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params)
172
+
173
+ generated_text = [output.outputs[0].text for output in outputs]
174
+ print(generated_text)
175
+ ```
176
+
177
+ ## 4. License
178
+ This code repository is licensed under [the MIT License](LICENSE-CODE). The use of DeepSeek-V2 Base/Chat models is subject to [the Model License](LICENSE-MODEL). DeepSeek-V2 series (including Base and Chat) supports commercial use.
179
+
180
+ ## 5. Citation
181
+ ```
182
+ @misc{deepseekv2,
183
+ title={DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model},
184
+ author={DeepSeek-AI},
185
+ year={2024},
186
+ eprint={2405.04434},
187
+ archivePrefix={arXiv},
188
+ primaryClass={cs.CL}
189
+ }
190
+ ```
191
+
192
+ ## 6. Contact
193
+ If you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).
194
+ ","{""id"": ""deepseek-ai/DeepSeek-V2-Chat-0628"", ""author"": ""deepseek-ai"", ""sha"": ""5d09e272c2b223830f4e84359cd9dd047a5d7c78"", ""last_modified"": ""2024-07-18 12:40:44+00:00"", ""created_at"": ""2024-07-18 02:47:09+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 167, ""downloads_all_time"": null, ""likes"": 176, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deepseek_v2"", ""text-generation"", ""conversational"", ""custom_code"", ""arxiv:2405.04434"", ""license:other"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""license: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""DeepseekV2ForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_deepseek.DeepseekV2Config"", ""AutoModel"": ""modeling_deepseek.DeepseekV2Model"", ""AutoModelForCausalLM"": ""modeling_deepseek.DeepseekV2ForCausalLM""}, ""model_type"": ""deepseek_v2"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cbegin\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": {""__type"": ""AddedToken"", ""content"": ""<\uff5cend\u2581of\u2581sentence\uff5c>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""unk_token"": null, ""chat_template"": ""{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<\uff5cUser\uff5c>' + message['content'] }}{% elif message['role'] == 'assistant' %}{{ '<\uff5cAssistant\uff5c>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<\uff5cAssistant\uff5c>' }}{% endif %}""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/arena1.jpeg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/arena2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/arena3.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00008-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00009-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00010-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00011-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00012-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00013-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00014-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00015-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00016-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00017-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00018-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00019-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00020-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00021-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00022-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00023-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00024-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00025-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00026-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00027-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00028-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00029-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00030-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00031-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00032-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00033-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00034-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00035-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00036-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00037-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00038-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00039-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00040-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00041-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00042-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00043-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00044-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00045-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00046-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00047-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00048-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00049-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00050-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00051-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00052-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00053-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00054-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00055-of-000055.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_deepseek.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_deepseek_fast.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Steven10429/apply_lora_and_quantize"", ""DanofficeIT/privatellm"", ""oyuncunettv/NetHubAI"", ""Mat17892/iris"", ""YZ-TAN/flask-llama"", ""KBaba7/llama.cpp""], ""safetensors"": {""parameters"": {""BF16"": 235741434880}, ""total"": 235741434880}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-07-18 12:40:44+00:00"", ""cardData"": ""license: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": ""modeling_deepseek.DeepseekV2ForCausalLM"", ""pipeline_tag"": ""text-generation"", ""processor"": null}, ""_id"": ""6698822db2ebada9f48ccea4"", ""modelId"": ""deepseek-ai/DeepSeek-V2-Chat-0628"", ""usedStorage"": 471486512925}",0,https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF,1,,0,"https://huggingface.co/bartowski/DeepSeek-V2-Chat-0628-GGUF, https://huggingface.co/nisten/deepseek-0628-gguf, https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-i1-GGUF",3,,0,"DanofficeIT/privatellm, KBaba7/llama.cpp, Mat17892/iris, Steven10429/apply_lora_and_quantize, YZ-TAN/flask-llama, huggingface/InferenceSupport/discussions/new?title=deepseek-ai/DeepSeek-V2-Chat-0628&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdeepseek-ai%2FDeepSeek-V2-Chat-0628%5D(%2Fdeepseek-ai%2FDeepSeek-V2-Chat-0628)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, oyuncunettv/NetHubAI",7
195
+ mradermacher/DeepSeek-V2-Chat-0628-GGUF,"---
196
+ base_model: deepseek-ai/DeepSeek-V2-Chat-0628
197
+ language:
198
+ - en
199
+ library_name: transformers
200
+ license: other
201
+ license_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL
202
+ license_name: deepseek
203
+ quantized_by: mradermacher
204
+ ---
205
+ ## About
206
+
207
+ <!-- ### quantize_version: 2 -->
208
+ <!-- ### output_tensor_quantised: 1 -->
209
+ <!-- ### convert_type: hf -->
210
+ <!-- ### vocab_type: -->
211
+ <!-- ### tags: -->
212
+ static quants of https://huggingface.co/deepseek-ai/DeepSeek-V2-Chat-0628
213
+
214
+ <!-- provided-files -->
215
+ weighted/imatrix quants are available at https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-i1-GGUF
216
+ ## Usage
217
+
218
+ If you are unsure how to use GGUF files, refer to one of [TheBloke's
219
+ READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
220
+ more details, including on how to concatenate multi-part files.
221
+
222
+ ## Provided Quants
223
+
224
+ (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
225
+
226
+ | Link | Type | Size/GB | Notes |
227
+ |:-----|:-----|--------:|:------|
228
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q2_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q2_K.gguf.part2of2) | Q2_K | 86.0 | |
229
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_XS.gguf.part2of2) | IQ3_XS | 96.4 | |
230
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_S.gguf.part3of3) | IQ3_S | 101.8 | beats Q3_K* |
231
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part3of3) | Q3_K_S | 101.8 | |
232
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ3_M.gguf.part3of3) | IQ3_M | 103.5 | |
233
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part3of3) | Q3_K_M | 112.8 | lower quality |
234
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part3of3) | Q3_K_L | 122.5 | |
235
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part3of3) | IQ4_XS | 126.9 | |
236
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part3of3) | Q4_K_S | 134.0 | fast, recommended |
237
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part3of3) | Q4_K_M | 142.6 | fast, recommended |
238
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part4of4) | Q5_K_S | 162.4 | |
239
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part4of4) | Q5_K_M | 167.3 | |
240
+ | [PART 1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q6_K.gguf.part1of4) [PART 2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q6_K.gguf.part2of4) [PART 3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q6_K.gguf.part3of4) [PART 4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q6_K.gguf.part4of4) | Q6_K | 193.6 | very good quality |
241
+ | [P1](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part1of6) [P2](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part2of6) [P3](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part3of6) [P4](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part4of6) [P5](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part5of6) [P6](https://huggingface.co/mradermacher/DeepSeek-V2-Chat-0628-GGUF/resolve/main/DeepSeek-V2-Chat-0628.Q8_0.gguf.part6of6) | Q8_0 | 250.7 | fast, best quality |
242
+
243
+ Here is a handy graph by ikawrakow comparing some lower-quality quant
244
+ types (lower is better):
245
+
246
+ ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png)
247
+
248
+ And here are Artefact2's thoughts on the matter:
249
+ https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
250
+
251
+ ## FAQ / Model Request
252
+
253
+ See https://huggingface.co/mradermacher/model_requests for some answers to
254
+ questions you might have and/or if you want some other model quantized.
255
+
256
+ ## Thanks
257
+
258
+ I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
259
+ me use its servers and providing upgrades to my workstation to enable
260
+ this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to.
261
+
262
+ <!-- end -->
263
+ ","{""id"": ""mradermacher/DeepSeek-V2-Chat-0628-GGUF"", ""author"": ""mradermacher"", ""sha"": ""f74dbf06ba74e99f60d8c6f0ee11586f584c4547"", ""last_modified"": ""2025-01-22 04:19:51+00:00"", ""created_at"": ""2024-08-02 06:24:44+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 3, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""en"", ""base_model:deepseek-ai/DeepSeek-V2-Chat-0628"", ""base_model:finetune:deepseek-ai/DeepSeek-V2-Chat-0628"", ""license:other"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: deepseek-ai/DeepSeek-V2-Chat-0628\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL\nquantized_by: mradermacher"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_XS.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ3_XS.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.IQ4_XS.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q2_K.gguf.part1of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q2_K.gguf.part2of2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_L.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q3_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_M.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part1of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part2of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q4_K_S.gguf.part3of3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_M.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q5_K_S.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q6_K.gguf.part1of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q6_K.gguf.part2of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q6_K.gguf.part3of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q6_K.gguf.part4of4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part1of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part2of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part3of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part4of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part5of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='DeepSeek-V2-Chat-0628.Q8_0.gguf.part6of6', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-01-22 04:19:51+00:00"", ""cardData"": ""base_model: deepseek-ai/DeepSeek-V2-Chat-0628\nlanguage:\n- en\nlibrary_name: transformers\nlicense: other\nlicense_name: deepseek\nlicense_link: https://github.com/deepseek-ai/DeepSeek-V2/blob/main/LICENSE-MODEL\nquantized_by: mradermacher"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""66ac7bac8fc00b5c163784ac"", ""modelId"": ""mradermacher/DeepSeek-V2-Chat-0628-GGUF"", ""usedStorage"": 1900813261248}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=mradermacher/DeepSeek-V2-Chat-0628-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bmradermacher%2FDeepSeek-V2-Chat-0628-GGUF%5D(%2Fmradermacher%2FDeepSeek-V2-Chat-0628-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Kimi-VL-A3B-Instruct_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ moonshotai/Kimi-VL-A3B-Instruct,"---
3
+ license: mit
4
+ base_model:
5
+ - moonshotai/Moonlight-16B-A3B
6
+ pipeline_tag: image-text-to-text
7
+ library_name: transformers
8
+ tags:
9
+ - agent
10
+ - video
11
+ - screenspot
12
+ - long-context
13
+ ---
14
+
15
+
16
+
17
+ <div align=""center"">
18
+ <img width=""30%"" src=""figures/logo.png"">
19
+ </div>
20
+
21
+ <div align=""center"">
22
+ <a href=""https://arxiv.org/abs/2504.07491"">
23
+ <b>📄 Tech Report</b>
24
+ </a> &nbsp;|&nbsp;
25
+ <a href=""https://github.com/MoonshotAI/Kimi-VL"">
26
+ <b>📄 Github</b>
27
+ </a> &nbsp;|&nbsp;
28
+ <a href=""https://huggingface.co/spaces/moonshotai/Kimi-VL-A3B/"">💬 Chat Web</a>
29
+ </div>
30
+
31
+
32
+ ## Introduction
33
+
34
+ We present **Kimi-VL**, an efficient open-source Mixture-of-Experts (MoE) vision-language model (VLM) that offers **advanced multimodal reasoning, long-context understanding, and strong agent capabilities**—all while activating only **2.8B** parameters in its language decoder (Kimi-VL-A3B).
35
+
36
+ Kimi-VL demonstrates strong performance across challenging domains:
37
+ as a general-purpose VLM, Kimi-VL excels in multi-turn agent interaction tasks (e.g.,OSWorld), achieving state-of-the-art results comparable to flagship models.
38
+ Furthermore, it exhibits remarkable capabilities across diverse challenging vision language tasks, including college-level image and video comprehension, optical character recognition (OCR), mathematical reasoning, multi-image understanding, and etc.
39
+
40
+ In comparative evaluations, it effectively competes with cutting-edge efficient VLMs such as GPT-4o-mini, Qwen2.5-VL-7B, and Gemma-3-12B-IT, while surpassing GPT-4o in several specialized domains.
41
+
42
+ Kimi-VL also advances the pareto frontiers of multimodal models in processing long contexts and perceiving clearly: Equipped with a 128K extended context window, Kimi-VL can processes long and diverse inputs, achieving impressive scores of 64.5 on LongVideoBench, and 35.1 on MMLongBench-Doc; Its native-resolution vision encoder, MoonViT, further allows it to see and understand ultra-high-resolution visual inputs, achieving 83.2 on InfoVQA and 34.5 on ScreenSpot-Pro, while maintaining lower computational cost with common visual inputs and general tasks.
43
+
44
+ Building on this foundation, we introduce an advanced long-thinking variant: **Kimi-VL-Thinking**. Developed through long chain-of-thought (CoT) supervised fine-tuning (SFT) and reinforcement learning (RL), this model exhibits strong long-horizon reasoning capabilities. It achieves scores of 61.7 on MMMU, 36.8 on MathVision, and 71.3 on MathVista while maintaining the compact 2.8B activated LLM parameter footprint, setting a new standard for efficient yet capable multimodal **thinking** models.
45
+
46
+ ## Architecture
47
+
48
+ The model adopts an MoE language model, a native-resolution visual encoder (MoonViT), and an MLP projector, as illustrated in the following image.
49
+
50
+ <div align=""center"">
51
+ <img width=""90%"" src=""figures/arch.png"">
52
+ </div>
53
+
54
+ ## Model Variants
55
+
56
+ 🤗 For general multimodal perception and understanding, OCR, long video and long document, video perception, and agent uses, we recommend `Kimi-VL-A3B-Instruct` for efficient inference; for advanced text and multimodal reasoning (e.g. math), please consider using `Kimi-VL-A3B-Thinking`.
57
+
58
+ <div align=""center"">
59
+
60
+ | **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download Link** |
61
+ | :------------: | :------------: | :------------: | :------------: | :------------: |
62
+ | Kimi-VL-A3B-Instruct | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct) |
63
+ | Kimi-VL-A3B-Thinking | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Thinking) |
64
+
65
+ </div>
66
+
67
+ > [!Note]
68
+ > Recommended parameter settings:
69
+ > - For **Thinking models**, it is recommended to use `Temperature = 0.6`.
70
+ > - For **Instruct models**, it is recommended to use `Temperature = 0.2`.
71
+
72
+ ## Performance
73
+
74
+ As an efficient model, Kimi-VL can robustly handle diverse tasks (fine-grained perception, math, college-level problems, OCR, agent, etc) across a broad spectrum of input forms (single-image, multi-image, video, long-document, etc).
75
+
76
+
77
+ A brief comparison with existing 10B-level dense VLMs and DeepSeek-VL2 (A4.5B):
78
+
79
+ <div align=""center"">
80
+ <img width=""100%"" src=""figures/instruct_perf.png"">
81
+ </div>
82
+
83
+ Full comparison (GPT-4o included for reference):
84
+
85
+ <div align=""center"">
86
+
87
+ | Benchmark (Metric) | GPT-4o | GPT-4o-Mini | Qwen2.5-VL-7B | Llama3.2-11B-Inst. | Gemma3-12B-IT | DeepSeek-VL2 | Kimi-VL-A3B-Instruct |
88
+ |--------------------------------|--------|-------------|---------------|--------------------|---------------|--------------|-------------|
89
+ | **Architecture** | - | - | Dense | Dense | Dense | MoE | MoE |
90
+ | **# Act. Params (LLM+VT)** | - | - | 7.6B+0.7B | 8B+2.6B | 12B+0.4B | 4.1B+0.4B | 2.8B+0.4B |
91
+ | **# Total Params** | - | - | 8B | 11B | 12B | 28B | 16B |
92
+ | | | | | | | | |
93
+ | **College-level** | | | | | | | |
94
+ | MMMU-Val (Pass@1) | *69.1* | **60.0** | 58.6 | 48 | 59.6 | 51.1 | 57.0 |
95
+ | VideoMMMU (Pass@1) | *61.2* | - | 47.4 | 41.8 | **57.2** | 44.4 | 52.6 |
96
+ | MMVU-Val (Pass@1) | *67.4* | **61.6** | 50.1 | 44.4 | 57.0 | 52.1 | 52.2 |
97
+ | | | | | | | | |
98
+ | **General** | | | | | | | |
99
+ | MMBench-EN-v1.1 (Acc) | *83.1* | 77.1 | 82.6 | 65.8 | 74.6 | 79.6 | **83.1** |
100
+ | MMStar (Acc) | *64.7* | 54.8 | **63.9** | 49.8 | 56.1 | 55.5 | 61.3 |
101
+ | MMVet (Pass@1) | *69.1* | 66.9 | **67.1** | 57.6 | 64.9 | 60.0 | 66.7 |
102
+ | RealWorldQA (Acc) | *75.4* | 67.1 | **68.5** | 63.3 | 59.1 | 68.4 | 68.1 |
103
+ | AI2D (Acc) | *84.6* | 77.8 | 83.9 | 77.3 | 78.1 | 81.4 | **84.9** |
104
+ | | | | | | | | |
105
+ | **Multi-image** | | | | | | | |
106
+ | BLINK (Acc) | *68.0* | 53.6 | 56.4 | 39.8 | 50.3 | - | **57.3** |
107
+ | | | | | | | | |
108
+ | **Math** | | | | | | | |
109
+ | MathVista (Pass@1) | *63.8* | 52.5 | 68.2 | 47.7 | 56.1 | 62.8 | **68.7** |
110
+ | MathVision (Pass@1) | *30.4* | - | 25.1 | 13.6 | **32.1** | 17.3 | 21.4 |
111
+ | | | | | | | | |
112
+ | **OCR** | | | | | | | |
113
+ | InfoVQA (Acc) | *80.7* | 57.9 | 82.6 | 34.6 | 43.8 | 78.1 | **83.2** |
114
+ | OCRBench (Acc) | *815* | 785 | 864 | 753 | 702 | 811 | **867** |
115
+ | | | | | | | | |
116
+ | **OS Agent** | | | | | | | |
117
+ | ScreenSpot-V2 (Acc) | *18.1* | 6.9 | 84.2 | - | - | - | **92.8** |
118
+ | ScreenSpot-Pro (Acc) | *0.8* | - | 29.0 | - | - | - | **34.5** |
119
+ | OSWorld (Pass@1) | *5.03* | - | 2.5 | - | - | - | **8.22** |
120
+ | WindowsAgentArena (Pass@1) | *9.4* | 2.7 | 3.4 | - | - | - | **10.4** |
121
+ | | | | | | | | |
122
+ | **Long Document** | | | | | | | |
123
+ | MMLongBench-Doc (Acc) | *42.8* | 29.0 | 29.6 | 13.8 | 21.3 | - | **35.1** |
124
+ | | | | | | | | |
125
+ | **Long Video** | | | | | | | |
126
+ | Video-MME (w/o sub.) | *71.9* | 64.8 | 65.1 | 46.0 | 58.2 | - | **67.8** |
127
+ | Video-MME (w sub.) | *77.2* | 68.9 | 71.6 | 49.5 | 62.1 | - | **72.6** |
128
+ | MLVU-MCQ (Acc) | *64.6* | 48.1 | 70.2 | 44.4 | 52.3 | - | **74.2** |
129
+ | LongVideoBench (val) | *66.7* | 58.2 | 56.0 | 45.5 | 51.5 | - | **64.5** |
130
+ | | | | | | | | |
131
+ | **Video Perception** | | | | | | | |
132
+ | EgoSchema (full) | 72.2 | - | 65.0 | 54.3 | 56.9 | 38.5 | **78.5** |
133
+ | VSI-Bench | 34.0 | - | 34.2 | 20.6 | 32.4 | 21.7 | **37.4** |
134
+ | TOMATO | *37.7* | 28.8 | 27.6 | 21.5 | 28.6 | 27.2 | **31.7** |
135
+
136
+ </div>
137
+
138
+ ### Inference with 🤗 Hugging Face Transformers
139
+
140
+ > [!Note]
141
+ > Recommended prompt for OS agent tasks (Expected output is a point):
142
+ > - `Please observe the screenshot, please locate the following elements with action and point.<instruction> [YOUR INSTRUCTION]`
143
+
144
+ We introduce how to use our model at inference stage using transformers library. It is recommended to use python=3.10, torch>=2.1.0, and transformers=4.48.2 as the development environment.
145
+
146
+ ```python
147
+ from PIL import Image
148
+ from transformers import AutoModelForCausalLM, AutoProcessor
149
+
150
+ model_path = ""moonshotai/Kimi-VL-A3B-Instruct""
151
+ model = AutoModelForCausalLM.from_pretrained(
152
+ model_path,
153
+ torch_dtype=""auto"",
154
+ device_map=""auto"",
155
+ trust_remote_code=True,
156
+ )
157
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
158
+
159
+ image_path = ""./figures/demo.png""
160
+ image = Image.open(image_path)
161
+ messages = [
162
+ {""role"": ""user"", ""content"": [{""type"": ""image"", ""image"": image_path}, {""type"": ""text"", ""text"": ""What is the dome building in the picture? Think step by step.""}]}
163
+ ]
164
+ text = processor.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
165
+ inputs = processor(images=image, text=text, return_tensors=""pt"", padding=True, truncation=True).to(model.device)
166
+ generated_ids = model.generate(**inputs, max_new_tokens=512)
167
+ generated_ids_trimmed = [
168
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
169
+ ]
170
+ response = processor.batch_decode(
171
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
172
+ )[0]
173
+ print(response)
174
+ ```
175
+
176
+ ### Inference with VLLM
177
+
178
+ We have submitted a Merge Request [#16387](https://github.com/vllm-project/vllm/pull/16387) to vLLM. You are welcome to deploy Kimi-VL using the branch corresponding to the vLLM MR until the MR is merged.
179
+
180
+ ## Citation
181
+
182
+ ```
183
+ @misc{kimiteam2025kimivltechnicalreport,
184
+ title={{Kimi-VL} Technical Report},
185
+ author={Kimi Team and Angang Du and Bohong Yin and Bowei Xing and Bowen Qu and Bowen Wang and Cheng Chen and Chenlin Zhang and Chenzhuang Du and Chu Wei and Congcong Wang and Dehao Zhang and Dikang Du and Dongliang Wang and Enming Yuan and Enzhe Lu and Fang Li and Flood Sung and Guangda Wei and Guokun Lai and Han Zhu and Hao Ding and Hao Hu and Hao Yang and Hao Zhang and Haoning Wu and Haotian Yao and Haoyu Lu and Heng Wang and Hongcheng Gao and Huabin Zheng and Jiaming Li and Jianlin Su and Jianzhou Wang and Jiaqi Deng and Jiezhong Qiu and Jin Xie and Jinhong Wang and Jingyuan Liu and Junjie Yan and Kun Ouyang and Liang Chen and Lin Sui and Longhui Yu and Mengfan Dong and Mengnan Dong and Nuo Xu and Pengyu Cheng and Qizheng Gu and Runjie Zhou and Shaowei Liu and Sihan Cao and Tao Yu and Tianhui Song and Tongtong Bai and Wei Song and Weiran He and Weixiao Huang and Weixin Xu and Xiaokun Yuan and Xingcheng Yao and Xingzhe Wu and Xinxing Zu and Xinyu Zhou and Xinyuan Wang and Y. Charles and Yan Zhong and Yang Li and Yangyang Hu and Yanru Chen and Yejie Wang and Yibo Liu and Yibo Miao and Yidao Qin and Yimin Chen and Yiping Bao and Yiqin Wang and Yongsheng Kang and Yuanxin Liu and Yulun Du and Yuxin Wu and Yuzhi Wang and Yuzi Yan and Zaida Zhou and Zhaowei Li and Zhejun Jiang and Zheng Zhang and Zhilin Yang and Zhiqi Huang and Zihao Huang and Zijia Zhao and Ziwei Chen},
186
+ year={2025},
187
+ eprint={2504.07491},
188
+ archivePrefix={arXiv},
189
+ primaryClass={cs.CV},
190
+ url={https://arxiv.org/abs/2504.07491},
191
+ }
192
+ ```","{""id"": ""moonshotai/Kimi-VL-A3B-Instruct"", ""author"": ""moonshotai"", ""sha"": ""7a3c132a7b0f1f1677f5a72f258bd3afded7d357"", ""last_modified"": ""2025-04-20 12:28:03+00:00"", ""created_at"": ""2025-04-09 08:07:06+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 33892, ""downloads_all_time"": null, ""likes"": 182, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""kimi_vl"", ""feature-extraction"", ""agent"", ""video"", ""screenspot"", ""long-context"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""arxiv:2504.07491"", ""base_model:moonshotai/Moonlight-16B-A3B"", ""base_model:finetune:moonshotai/Moonlight-16B-A3B"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- moonshotai/Moonlight-16B-A3B\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- agent\n- video\n- screenspot\n- long-context"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""KimiVLForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_kimi_vl.KimiVLConfig"", ""AutoModel"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""AutoModelForCausalLM"": ""modeling_kimi_vl.KimiVLForConditionalGeneration""}, ""model_type"": ""kimi_vl"", ""tokenizer_config"": {""bos_token"": ""[BOS]"", ""eos_token"": ""[EOS]"", ""pad_token"": ""[PAD]"", ""unk_token"": ""[UNK]"", ""chat_template"": ""{%- for message in messages -%}{%- if loop.first and messages[0]['role'] != 'system' -%}{{'<|im_system|>system<|im_middle|>You are a helpful assistant<|im_end|>'}}{%- endif -%}{%- if message['role'] == 'system' -%}{{'<|im_system|>'}}{%- endif -%}{%- if message['role'] == 'user' -%}{{'<|im_user|>'}}{%- endif -%}{%- if message['role'] == 'assistant' -%}{{'<|im_assistant|>'}}{%- endif -%}{{- message['role'] -}}{{'<|im_middle|>'}}{%- if message['content'] is string -%}{{- message['content'] + '<|im_end|>' -}}{%- else -%}{%- for content in message['content'] -%}{%- if content['type'] == 'image' or 'image' in content or 'image_url' in content -%}{{'<|media_start|>image<|media_content|><|media_pad|><|media_end|>'}}{%- else -%}{{content['text']}}{%- endif -%}{%- endfor -%}{{'<|im_end|>'}}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{'<|im_assistant|>assistant<|im_middle|>'}}{%- endif -%}""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.jinja', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/arch.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/demo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/instruct_perf.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tiktoken.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_moonshot.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""moonshotai/Kimi-VL-A3B""], ""safetensors"": {""parameters"": {""BF16"": 16407657776}, ""total"": 16407657776}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-20 12:28:03+00:00"", ""cardData"": ""base_model:\n- moonshotai/Moonlight-16B-A3B\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text\ntags:\n- agent\n- video\n- screenspot\n- long-context"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""67f62aaa5906599eda1762d5"", ""modelId"": ""moonshotai/Kimi-VL-A3B-Instruct"", ""usedStorage"": 32822309117}",0,"https://huggingface.co/moonshotai/Kimi-VL-A3B-Thinking, https://huggingface.co/blobbybob/Kimi-VL-A3B-Thinking-GGUF",2,,0,,0,,0,"huggingface/InferenceSupport/discussions/634, moonshotai/Kimi-VL-A3B, moonshotai/Kimi-VL-A3B/",3
193
+ moonshotai/Kimi-VL-A3B-Thinking,"---
194
+ base_model:
195
+ - moonshotai/Kimi-VL-A3B-Instruct
196
+ license: mit
197
+ pipeline_tag: image-text-to-text
198
+ library_name: transformers
199
+ ---
200
+
201
+ <div align=""center"">
202
+ <img width=""30%"" src=""figures/logo.png"">
203
+ </div>
204
+
205
+ <div align=""center"">
206
+ <a href=""https://arxiv.org/abs/2504.07491"">
207
+ <b>📄 Tech Report</b>
208
+ </a> &nbsp;|&nbsp;
209
+ <a href=""https://github.com/MoonshotAI/Kimi-VL"">
210
+ <b>📄 Github</b>
211
+ </a> &nbsp;|&nbsp;
212
+ <a href=""https://huggingface.co/spaces/moonshotai/Kimi-VL-A3B-Thinking/"">💬 Chat Web</a>
213
+ </div>
214
+
215
+ ## 1. Introduction
216
+
217
+ We present **Kimi-VL**, an efficient open-source Mixture-of-Experts (MoE) vision-language model (VLM) that offers **advanced multimodal reasoning, long-context understanding, and strong agent capabilities**—all while activating only **2.8B** parameters in its language decoder (Kimi-VL-A3B).
218
+
219
+ Kimi-VL demonstrates strong performance across challenging domains:
220
+ as a general-purpose VLM, Kimi-VL excels in multi-turn agent interaction tasks (e.g.,OSWorld), achieving state-of-the-art results comparable to flagship models.
221
+ Furthermore, it exhibits remarkable capabilities across diverse challenging vision language tasks, including college-level image and video comprehension, optical character recognition (OCR), mathematical reasoning, multi-image understanding, and etc.
222
+
223
+ In comparative evaluations, it effectively competes with cutting-edge efficient VLMs such as GPT-4o-mini, Qwen2.5-VL-7B, and Gemma-3-12B-IT, while surpassing GPT-4o in several specialized domains.
224
+
225
+ Kimi-VL also advances the pareto frontiers of multimodal models in processing long contexts and perceiving clearly: Equipped with a 128K extended context window, Kimi-VL can processes long and diverse inputs, achieving impressive scores of 64.5 on LongVideoBench, and 35.1 on MMLongBench-Doc; Its native-resolution vision encoder, MoonViT, further allows it to see and understand ultra-high-resolution visual inputs, achieving 83.2 on InfoVQA and 34.5 on ScreenSpot-Pro, while maintaining lower computational cost with common visual inputs and general tasks.
226
+
227
+ Building on this foundation, we introduce an advanced long-thinking variant: **Kimi-VL-Thinking**. Developed through long chain-of-thought (CoT) supervised fine-tuning (SFT) and reinforcement learning (RL), this model exhibits strong long-horizon reasoning capabilities. It achieves scores of 61.7 on MMMU, 36.8 on MathVision, and 71.3 on MathVista while maintaining the compact 2.8B activated LLM parameter footprint, setting a new standard for efficient yet capable multimodal **thinking** models.
228
+
229
+ More information can be found in our technical report: [Kimi-VL Technical Report](https://arxiv.org/abs/2504.07491).
230
+
231
+ ## 2. Architecture
232
+
233
+ The model adopts an MoE language model, a native-resolution visual encoder (MoonViT), and an MLP projector, as illustrated in the following image.
234
+
235
+ <div align=""center"">
236
+ <img width=""90%"" src=""figures/arch.png"">
237
+ </div>
238
+
239
+ ## 3. Model Variants
240
+
241
+ 🤗 For general multimodal perception and understanding, OCR, long video and long document, video perception, and agent uses, we recommend `Kimi-VL-A3B-Instruct` for efficient inference; for advanced text and multimodal reasoning (e.g. math), please consider using `Kimi-VL-A3B-Thinking`.
242
+
243
+ <div align=""center"">
244
+
245
+ | **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download Link** |
246
+ | :------------: | :------------: | :------------: | :------------: | :------------: |
247
+ | Kimi-VL-A3B-Instruct | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct) |
248
+ | Kimi-VL-A3B-Thinking | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Thinking) |
249
+
250
+ </div>
251
+
252
+ > [!Note]
253
+ > Recommended parameter settings:
254
+ > - For **Thinking models**, it is recommended to use `Temperature = 0.6`.
255
+ > - For **Instruct models**, it is recommended to use `Temperature = 0.2`.
256
+
257
+
258
+
259
+ ## 4. Performance
260
+
261
+ With effective long-thinking abilitites, Kimi-VL-A3B-Thinking can match the performance of 30B/70B frontier open-source VLMs on MathVision benchmark:
262
+
263
+ <div align=""center"">
264
+ <img width=""100%"" src=""figures/thinking_perf.png"">
265
+ </div>
266
+
267
+ Full comparison on MMMU, MathVision, and MathVista-mini:
268
+
269
+ <div align=""center"">
270
+
271
+ | Benchmark (Metric) | GPT-4o | GPT-4o-mini | Qwen2.5-VL-72B | Qwen2.5-VL-7B | Gemma-3-27B | Gemma-3-12B | o1-1217 | QVQ-72B | Kimi-k1.5 | Kimi-VL-Thinking-A3B |
272
+ |---------------------------------|--------|-------------|----------------|---------------|-------------|-------------|---------|----------|-----------|----------------------|
273
+ | *Thinking Model?* | | | | | | | ✅ | ✅ | ✅ | ✅ |
274
+ | MathVision (full) (Pass@1) | 30.4 | - | 38.1 | 25.1 | 35.5 | 32.1 | - | 35.9 | 38.6 | 36.8 |
275
+ | MathVista (mini) (Pass@1) | 63.8 | 56.7 | 74.8 | 68.2 | 62.3 | 56.4 | 71.0 | 71.4 | 74.9 | 71.3 |
276
+ | MMMU (val) (Pass@1) | 69.1 | 60.0 | 74.8 | 58.6 | 64.8 | 59.6 | 77.3 | 70.3 | 70.0 | 61.7 |
277
+
278
+ </div>
279
+
280
+ ### Inference with 🤗 Hugging Face Transformers
281
+
282
+ We introduce how to use our model at inference stage using transformers library. It is recommended to use python=3.10, torch>=2.1.0, and transformers=4.48.2 as the development environment.
283
+
284
+ ```python
285
+ from PIL import Image
286
+ from transformers import AutoModelForCausalLM, AutoProcessor
287
+
288
+ model_path = ""moonshotai/Kimi-VL-A3B-Thinking""
289
+ model = AutoModelForCausalLM.from_pretrained(
290
+ model_path,
291
+ torch_dtype=""auto"",
292
+ device_map=""auto"",
293
+ trust_remote_code=True,
294
+ )
295
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
296
+
297
+ image_paths = [""./figures/demo1.png"", ""./figures/demo2.png""]
298
+ images = [Image.open(path) for path in image_paths]
299
+ messages = [
300
+ {
301
+ ""role"": ""user"",
302
+ ""content"": [
303
+ {""type"": ""image"", ""image"": image_path} for image_path in image_paths
304
+ ] + [{""type"": ""text"", ""text"": ""Please infer step by step who this manuscript belongs to and what it records""}],
305
+ },
306
+ ]
307
+ text = processor.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
308
+ inputs = processor(images=images, text=text, return_tensors=""pt"", padding=True, truncation=True).to(model.device)
309
+ generated_ids = model.generate(**inputs, max_new_tokens=2048)
310
+ generated_ids_trimmed = [
311
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
312
+ ]
313
+ response = processor.batch_decode(
314
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
315
+ )[0]
316
+ print(response)
317
+ ```
318
+
319
+ ### Inference with VLLM
320
+
321
+ We have submitted a Merge Request [#16387](https://github.com/vllm-project/vllm/pull/16387) to vLLM. You are welcome to deploy Kimi-VL using the branch corresponding to the vLLM MR until the MR is merged.
322
+
323
+ ## 5. Citation
324
+
325
+ ```
326
+ @misc{kimiteam2025kimivltechnicalreport,
327
+ title={{Kimi-VL} Technical Report},
328
+ author={Kimi Team and Angang Du and Bohong Yin and Bowei Xing and Bowen Qu and Bowen Wang and Cheng Chen and Chenlin Zhang and Chenzhuang Du and Chu Wei and Congcong Wang and Dehao Zhang and Dikang Du and Dongliang Wang and Enming Yuan and Enzhe Lu and Fang Li and Flood Sung and Guangda Wei and Guokun Lai and Han Zhu and Hao Ding and Hao Hu and Hao Yang and Hao Zhang and Haoning Wu and Haotian Yao and Haoyu Lu and Heng Wang and Hongcheng Gao and Huabin Zheng and Jiaming Li and Jianlin Su and Jianzhou Wang and Jiaqi Deng and Jiezhong Qiu and Jin Xie and Jinhong Wang and Jingyuan Liu and Junjie Yan and Kun Ouyang and Liang Chen and Lin Sui and Longhui Yu and Mengfan Dong and Mengnan Dong and Nuo Xu and Pengyu Cheng and Qizheng Gu and Runjie Zhou and Shaowei Liu and Sihan Cao and Tao Yu and Tianhui Song and Tongtong Bai and Wei Song and Weiran He and Weixiao Huang and Weixin Xu and Xiaokun Yuan and Xingcheng Yao and Xingzhe Wu and Xinxing Zu and Xinyu Zhou and Xinyuan Wang and Y. Charles and Yan Zhong and Yang Li and Yangyang Hu and Yanru Chen and Yejie Wang and Yibo Liu and Yibo Miao and Yidao Qin and Yimin Chen and Yiping Bao and Yiqin Wang and Yongsheng Kang and Yuanxin Liu and Yulun Du and Yuxin Wu and Yuzhi Wang and Yuzi Yan and Zaida Zhou and Zhaowei Li and Zhejun Jiang and Zheng Zhang and Zhilin Yang and Zhiqi Huang and Zihao Huang and Zijia Zhao and Ziwei Chen},
329
+ year={2025},
330
+ eprint={2504.07491},
331
+ archivePrefix={arXiv},
332
+ primaryClass={cs.CV},
333
+ url={https://arxiv.org/abs/2504.07491},
334
+ }
335
+ ```","{""id"": ""moonshotai/Kimi-VL-A3B-Thinking"", ""author"": ""moonshotai"", ""sha"": ""16681d8ac24e505088698e4e34ea494dd6e24400"", ""last_modified"": ""2025-04-20 12:30:41+00:00"", ""created_at"": ""2025-04-09 08:13:53+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 45617, ""downloads_all_time"": null, ""likes"": 386, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""kimi_vl"", ""feature-extraction"", ""image-text-to-text"", ""conversational"", ""custom_code"", ""arxiv:2504.07491"", ""base_model:moonshotai/Kimi-VL-A3B-Instruct"", ""base_model:finetune:moonshotai/Kimi-VL-A3B-Instruct"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- moonshotai/Kimi-VL-A3B-Instruct\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""KimiVLForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_kimi_vl.KimiVLConfig"", ""AutoModel"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""AutoModelForCausalLM"": ""modeling_kimi_vl.KimiVLForConditionalGeneration""}, ""model_type"": ""kimi_vl"", ""tokenizer_config"": {""bos_token"": ""[BOS]"", ""eos_token"": ""[EOS]"", ""pad_token"": ""[PAD]"", ""unk_token"": ""[UNK]"", ""chat_template"": ""{%- for message in messages -%}{%- if loop.first and messages[0]['role'] != 'system' -%}{{'<|im_system|>system<|im_middle|>You are a helpful assistant<|im_end|>'}}{%- endif -%}{%- if message['role'] == 'system' -%}{{'<|im_system|>'}}{%- endif -%}{%- if message['role'] == 'user' -%}{{'<|im_user|>'}}{%- endif -%}{%- if message['role'] == 'assistant' -%}{{'<|im_assistant|>'}}{%- endif -%}{{- message['role'] -}}{{'<|im_middle|>'}}{%- if message['content'] is string -%}{{- message['content'] + '<|im_end|>' -}}{%- else -%}{%- for content in message['content'] -%}{%- if content['type'] == 'image' or 'image' in content or 'image_url' in content -%}{{'<|media_start|>image<|media_content|><|media_pad|><|media_end|>'}}{%- else -%}{{content['text']}}{%- endif -%}{%- endfor -%}{{'<|im_end|>'}}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{'<|im_assistant|>assistant<|im_middle|>'}}{%- endif -%}""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chat_template.jinja', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/arch.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/demo1.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/demo2.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/logo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='figures/thinking_perf.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='image_processing_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00005-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00006-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00007-of-00007.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='processing_kimi_vl.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tiktoken.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_moonshot.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""moonshotai/Kimi-VL-A3B-Thinking"", ""aimevzulari/Prompt_Uzmani"", ""moonshotai/Kimi-VL-A3B"", ""therayz1/Prompt_Engineer"", ""kayrahan/promtmuhendisi"", ""teowu/Kimi-VL-A3B"", ""orrzxz/Kimi-VL-A3B-Thinking-VideoAnalyze"", ""shukdevdatta123/MedicineOCR3""], ""safetensors"": {""parameters"": {""BF16"": 16407657776}, ""total"": 16407657776}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-20 12:30:41+00:00"", ""cardData"": ""base_model:\n- moonshotai/Kimi-VL-A3B-Instruct\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_kimi_vl.KimiVLForConditionalGeneration"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""67f62c41af6fc450756d2b36"", ""modelId"": ""moonshotai/Kimi-VL-A3B-Thinking"", ""usedStorage"": 32820262484}",1,,0,,0,,0,,0,"aimevzulari/Prompt_Uzmani, huggingface/InferenceSupport/discussions/555, kayrahan/promtmuhendisi, moonshotai/Kimi-VL-A3B, moonshotai/Kimi-VL-A3B-Thinking, moonshotai/Kimi-VL-A3B-Thinking/, orrzxz/Kimi-VL-A3B-Thinking-VideoAnalyze, shukdevdatta123/MedicineOCR3, teowu/Kimi-VL-A3B, therayz1/Prompt_Engineer",10
336
+ blobbybob/Kimi-VL-A3B-Thinking-GGUF,"---
337
+ base_model:
338
+ - moonshotai/Kimi-VL-A3B-Instruct
339
+ license: mit
340
+ pipeline_tag: image-text-to-text
341
+ library_name: transformers
342
+ ---
343
+
344
+ <div align=""center"">
345
+ <img width=""30%"" src=""figures/logo.png"">
346
+ </div>
347
+
348
+ <div align=""center"">
349
+ <a href=""https://arxiv.org/abs/2504.07491"">
350
+ <b>📄 Tech Report</b>
351
+ </a> &nbsp;|&nbsp;
352
+ <a href=""https://github.com/MoonshotAI/Kimi-VL"">
353
+ <b>📄 Github</b>
354
+ </a> &nbsp;|&nbsp;
355
+ <a href=""https://huggingface.co/spaces/moonshotai/Kimi-VL-A3B-Thinking/"">💬 Chat Web</a>
356
+ </div>
357
+
358
+ ## 1. Introduction
359
+
360
+ We present **Kimi-VL**, an efficient open-source Mixture-of-Experts (MoE) vision-language model (VLM) that offers **advanced multimodal reasoning, long-context understanding, and strong agent capabilities**—all while activating only **2.8B** parameters in its language decoder (Kimi-VL-A3B).
361
+
362
+ Kimi-VL demonstrates strong performance across challenging domains:
363
+ as a general-purpose VLM, Kimi-VL excels in multi-turn agent interaction tasks (e.g.,OSWorld), achieving state-of-the-art results comparable to flagship models.
364
+ Furthermore, it exhibits remarkable capabilities across diverse challenging vision language tasks, including college-level image and video comprehension, optical character recognition (OCR), mathematical reasoning, multi-image understanding, and etc.
365
+
366
+ In comparative evaluations, it effectively competes with cutting-edge efficient VLMs such as GPT-4o-mini, Qwen2.5-VL-7B, and Gemma-3-12B-IT, while surpassing GPT-4o in several specialized domains.
367
+
368
+ Kimi-VL also advances the pareto frontiers of multimodal models in processing long contexts and perceiving clearly: Equipped with a 128K extended context window, Kimi-VL can processes long and diverse inputs, achieving impressive scores of 64.5 on LongVideoBench, and 35.1 on MMLongBench-Doc; Its native-resolution vision encoder, MoonViT, further allows it to see and understand ultra-high-resolution visual inputs, achieving 83.2 on InfoVQA and 34.5 on ScreenSpot-Pro, while maintaining lower computational cost with common visual inputs and general tasks.
369
+
370
+ Building on this foundation, we introduce an advanced long-thinking variant: **Kimi-VL-Thinking**. Developed through long chain-of-thought (CoT) supervised fine-tuning (SFT) and reinforcement learning (RL), this model exhibits strong long-horizon reasoning capabilities. It achieves scores of 61.7 on MMMU, 36.8 on MathVision, and 71.3 on MathVista while maintaining the compact 2.8B activated LLM parameter footprint, setting a new standard for efficient yet capable multimodal **thinking** models.
371
+
372
+ More information can be found in our technical report: [Kimi-VL Technical Report](https://arxiv.org/abs/2504.07491).
373
+
374
+ ## 2. Architecture
375
+
376
+ The model adopts an MoE language model, a native-resolution visual encoder (MoonViT), and an MLP projector, as illustrated in the following image.
377
+
378
+ <div align=""center"">
379
+ <img width=""90%"" src=""figures/arch.png"">
380
+ </div>
381
+
382
+ ## 3. Model Variants
383
+
384
+ 🤗 For general multimodal perception and understanding, OCR, long video and long document, video perception, and agent uses, we recommend `Kimi-VL-A3B-Instruct` for efficient inference; for advanced text and multimodal reasoning (e.g. math), please consider using `Kimi-VL-A3B-Thinking`.
385
+
386
+ <div align=""center"">
387
+
388
+ | **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download Link** |
389
+ | :------------: | :------------: | :------------: | :------------: | :------------: |
390
+ | Kimi-VL-A3B-Instruct | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct) |
391
+ | Kimi-VL-A3B-Thinking | 16B | 3B | 128K | [🤗 Hugging Face](https://huggingface.co/moonshotai/Kimi-VL-A3B-Thinking) |
392
+
393
+ </div>
394
+
395
+ > [!Note]
396
+ > Recommended parameter settings:
397
+ > - For **Thinking models**, it is recommended to use `Temperature = 0.6`.
398
+ > - For **Instruct models**, it is recommended to use `Temperature = 0.2`.
399
+
400
+
401
+
402
+ ## 4. Performance
403
+
404
+ With effective long-thinking abilitites, Kimi-VL-A3B-Thinking can match the performance of 30B/70B frontier open-source VLMs on MathVision benchmark:
405
+
406
+ <div align=""center"">
407
+ <img width=""100%"" src=""figures/thinking_perf.png"">
408
+ </div>
409
+
410
+ Full comparison on MMMU, MathVision, and MathVista-mini:
411
+
412
+ <div align=""center"">
413
+
414
+ | Benchmark (Metric) | GPT-4o | GPT-4o-mini | Qwen2.5-VL-72B | Qwen2.5-VL-7B | Gemma-3-27B | Gemma-3-12B | o1-1217 | QVQ-72B | Kimi-k1.5 | Kimi-VL-Thinking-A3B |
415
+ |---------------------------------|--------|-------------|----------------|---------------|-------------|-------------|---------|----------|-----------|----------------------|
416
+ | *Thinking Model?* | | | | | | | ✅ | ✅ | ✅ | ✅ |
417
+ | MathVision (full) (Pass@1) | 30.4 | - | 38.1 | 25.1 | 35.5 | 32.1 | - | 35.9 | 38.6 | 36.8 |
418
+ | MathVista (mini) (Pass@1) | 63.8 | 56.7 | 74.8 | 68.2 | 62.3 | 56.4 | 71.0 | 71.4 | 74.9 | 71.3 |
419
+ | MMMU (val) (Pass@1) | 69.1 | 60.0 | 74.8 | 58.6 | 64.8 | 59.6 | 77.3 | 70.3 | 70.0 | 61.7 |
420
+
421
+ </div>
422
+
423
+ ### Inference with 🤗 Hugging Face Transformers
424
+
425
+ We introduce how to use our model at inference stage using transformers library. It is recommended to use python=3.10, torch>=2.1.0, and transformers=4.48.2 as the development environment.
426
+
427
+ ```python
428
+ from PIL import Image
429
+ from transformers import AutoModelForCausalLM, AutoProcessor
430
+
431
+ model_path = ""moonshotai/Kimi-VL-A3B-Thinking""
432
+ model = AutoModelForCausalLM.from_pretrained(
433
+ model_path,
434
+ torch_dtype=""auto"",
435
+ device_map=""auto"",
436
+ trust_remote_code=True,
437
+ )
438
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
439
+
440
+ image_paths = [""./figures/demo1.png"", ""./figures/demo2.png""]
441
+ images = [Image.open(path) for path in image_paths]
442
+ messages = [
443
+ {
444
+ ""role"": ""user"",
445
+ ""content"": [
446
+ {""type"": ""image"", ""image"": image_path} for image_path in image_paths
447
+ ] + [{""type"": ""text"", ""text"": ""Please infer step by step who this manuscript belongs to and what it records""}],
448
+ },
449
+ ]
450
+ text = processor.apply_chat_template(messages, add_generation_prompt=True, return_tensors=""pt"")
451
+ inputs = processor(images=images, text=text, return_tensors=""pt"", padding=True, truncation=True).to(model.device)
452
+ generated_ids = model.generate(**inputs, max_new_tokens=2048)
453
+ generated_ids_trimmed = [
454
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
455
+ ]
456
+ response = processor.batch_decode(
457
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
458
+ )[0]
459
+ print(response)
460
+ ```
461
+
462
+ ### Inference with VLLM
463
+
464
+ We have submitted a Merge Request [#16387](https://github.com/vllm-project/vllm/pull/16387) to vLLM. You are welcome to deploy Kimi-VL using the branch corresponding to the vLLM MR until the MR is merged.
465
+
466
+ ## 5. Citation
467
+
468
+ ```
469
+ @misc{kimiteam2025kimivltechnicalreport,
470
+ title={{Kimi-VL} Technical Report},
471
+ author={Kimi Team and Angang Du and Bohong Yin and Bowei Xing and Bowen Qu and Bowen Wang and Cheng Chen and Chenlin Zhang and Chenzhuang Du and Chu Wei and Congcong Wang and Dehao Zhang and Dikang Du and Dongliang Wang and Enming Yuan and Enzhe Lu and Fang Li and Flood Sung and Guangda Wei and Guokun Lai and Han Zhu and Hao Ding and Hao Hu and Hao Yang and Hao Zhang and Haoning Wu and Haotian Yao and Haoyu Lu and Heng Wang and Hongcheng Gao and Huabin Zheng and Jiaming Li and Jianlin Su and Jianzhou Wang and Jiaqi Deng and Jiezhong Qiu and Jin Xie and Jinhong Wang and Jingyuan Liu and Junjie Yan and Kun Ouyang and Liang Chen and Lin Sui and Longhui Yu and Mengfan Dong and Mengnan Dong and Nuo Xu and Pengyu Cheng and Qizheng Gu and Runjie Zhou and Shaowei Liu and Sihan Cao and Tao Yu and Tianhui Song and Tongtong Bai and Wei Song and Weiran He and Weixiao Huang and Weixin Xu and Xiaokun Yuan and Xingcheng Yao and Xingzhe Wu and Xinxing Zu and Xinyu Zhou and Xinyuan Wang and Y. Charles and Yan Zhong and Yang Li and Yangyang Hu and Yanru Chen and Yejie Wang and Yibo Liu and Yibo Miao and Yidao Qin and Yimin Chen and Yiping Bao and Yiqin Wang and Yongsheng Kang and Yuanxin Liu and Yulun Du and Yuxin Wu and Yuzhi Wang and Yuzi Yan and Zaida Zhou and Zhaowei Li and Zhejun Jiang and Zheng Zhang and Zhilin Yang and Zhiqi Huang and Zihao Huang and Zijia Zhao and Ziwei Chen},
472
+ year={2025},
473
+ eprint={2504.07491},
474
+ archivePrefix={arXiv},
475
+ primaryClass={cs.CV},
476
+ url={https://arxiv.org/abs/2504.07491},
477
+ }
478
+ ```","{""id"": ""blobbybob/Kimi-VL-A3B-Thinking-GGUF"", ""author"": ""blobbybob"", ""sha"": ""84dca0a18e837cefd62e1b9862816c6e4ddbd3cb"", ""last_modified"": ""2025-04-13 05:01:11+00:00"", ""created_at"": ""2025-04-13 02:27:38+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 1, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""image-text-to-text"", ""arxiv:2504.07491"", ""base_model:moonshotai/Kimi-VL-A3B-Instruct"", ""base_model:finetune:moonshotai/Kimi-VL-A3B-Instruct"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""image-text-to-text"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- moonshotai/Kimi-VL-A3B-Instruct\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-04-13 05:01:11+00:00"", ""cardData"": ""base_model:\n- moonshotai/Kimi-VL-A3B-Instruct\nlibrary_name: transformers\nlicense: mit\npipeline_tag: image-text-to-text"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""67fb211a16159a48bc50d89f"", ""modelId"": ""blobbybob/Kimi-VL-A3B-Thinking-GGUF"", ""usedStorage"": 0}",1,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/682, moonshotai/Kimi-VL-A3B-Thinking/",2
Llama-3-8B-Instruct-Gradient-1048k_finetunes_20250425_125929.csv_finetunes_20250425_125929.csv ADDED
@@ -0,0 +1,913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ gradientai/Llama-3-8B-Instruct-Gradient-1048k,"---
3
+ language:
4
+ - en
5
+ pipeline_tag: text-generation
6
+ tags:
7
+ - meta
8
+ - llama-3
9
+ license: llama3
10
+ ---
11
+ <a href=""https://www.gradient.ai"" target=""_blank""><img src=""https://cdn-uploads.huggingface.co/production/uploads/655bb613e8a8971e89944f3e/TSa3V8YpoVagnTYgxiLaO.png"" width=""200""/></a>
12
+
13
+ # Llama-3 8B Gradient Instruct 1048k
14
+
15
+ Join our custom agent and long context (262k-1M+) waitlist: https://forms.gle/L6TDY7dozx8TuoUv7
16
+
17
+ Gradient incorporates your data to deploy autonomous assistants that power critical operations across your business. If you're looking to build custom AI models or agents, email us a message contact@gradient.ai. For more info see our [end-to-end development service for custom LLMs and AI systems](https://gradient.ai/development-lab)
18
+
19
+ [Join our Discord](https://discord.com/invite/2QVy2qt2mf)
20
+
21
+ This model extends LLama-3 8B's context length from 8k to > 1040K, developed by Gradient, sponsored by compute from [Crusoe Energy](https://huggingface.co/crusoeai). It demonstrates that SOTA LLMs can learn to operate on long context with minimal training by appropriately adjusting RoPE theta. We trained on 830M tokens for this stage, and 1.4B tokens total for all stages, which is < 0.01% of Llama-3's original pre-training data.
22
+
23
+ **Update (5/3): We further fine-tuned our model to strengthen its assistant-like chat ability as well.**
24
+
25
+ Updated NIAH result:
26
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/6585dc9be92bc5f258156bd6/-qaI__83ksClzoJzlqZjq.png"" width=""900"" />
27
+
28
+ RULER evals:
29
+ - Our model is behind only GPT-4 and Yi in the retrieval and Q&A tasks
30
+ - It’s the smallest parameter model to rank in the top 7 overall
31
+ <img src=""https://cdn-uploads.huggingface.co/production/uploads/655bb613e8a8971e89944f3e/0mLjl0Latrjc8gOrdtbc6.png"" width=""900"" />
32
+
33
+ **Approach:**
34
+
35
+ - [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) as the base
36
+ - NTK-aware interpolation [1] to initialize an optimal schedule for RoPE theta, followed by empirical RoPE theta optimization
37
+ - Progressive training on increasing context lengths, similar to [Large World Model](https://huggingface.co/LargeWorldModel) [2] (See details below)
38
+
39
+ **Infra:**
40
+
41
+ We build on top of the EasyContext Blockwise RingAttention library [3] to scalably and efficiently train on contexts up to 1048k tokens on [Crusoe Energy](https://huggingface.co/crusoeai) high performance L40S cluster.
42
+
43
+ Notably, we layered parallelism on top of Ring Attention with a custom network topology to better leverage large GPU clusters in the face of network bottlenecks from passing many KV blocks between devices. This gave us a 33x speedup in model training (compare 524k and 1048k to 65k and 262k in the table below).
44
+
45
+ **Data:**
46
+
47
+ For training data, we generate long contexts by augmenting [SlimPajama](https://huggingface.co/datasets/cerebras/SlimPajama-627B). We also fine-tune on a chat dataset based on UltraChat [4], following a similar recipe for data augmentation to [2].
48
+
49
+ **Progressive Training Details:**
50
+
51
+ | | 65K | 262K | 524k | 1048k |
52
+ |------------------------|-----------|-----------|-----------|-----------|
53
+ | Initialize From | LLaMA-3 8B| 65K | 262K | 524k |
54
+ | Sequence Length 2^N | 16 | 18 | 19 | 20 |
55
+ | RoPE theta | 15.3 M | 207.1 M | 1.06B | 2.80B |
56
+ | Batch Size | 1 | 1 | 16 | 8 |
57
+ | Gradient Accumulation Steps | 32 | 16 | 1 | 1 |
58
+ | Steps | 30 | 24 | 50 | 50 |
59
+ | Total Tokens | 62914560 | 100663296 | 419430400 | 838860800 |
60
+ | Learning Rate | 2.00E-05 | 2.00E-05 | 2.00E-05 | 2.00E-05 |
61
+ | # GPUs | 8 | 32 | 512 | 512 |
62
+ | GPU Type | NVIDIA L40S | NVIDIA L40S | NVIDIA L40S | NVIDIA L40S |
63
+ | Minutes to Train (Wall)| 202 | 555 | 61 | 87 |
64
+
65
+
66
+ **Evaluation:**
67
+
68
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6585dc9be92bc5f258156bd6/mWxIGZNi3ejlmeIDWafKu.png)
69
+
70
+ ```
71
+ EVAL_MAX_CONTEXT_LENGTH=1040200
72
+ EVAL_MIN_CONTEXT_LENGTH=100
73
+ EVAL_CONTEXT_INTERVAL=86675
74
+ EVAL_DEPTH_INTERVAL=0.2
75
+ EVAL_RND_NUMBER_DIGITS=8
76
+
77
+ HAYSTACK1:
78
+ EVAL_GENERATOR_TOKENS=25
79
+
80
+ HAYSTACK2:
81
+ EVAL_CONTEXT_INTERVAL=173350
82
+ EVAL_GENERATOR_TOKENS=150000
83
+
84
+ HAYSTACK3:
85
+ EVAL_GENERATOR_TOKENS=925000
86
+ ```
87
+
88
+ All boxes not pictured for Haystack 1 and 3 are 100% accurate. Haystacks 1,2 and 3 are further detailed in this [blog post](https://gradient.ai/blog/the-haystack-matters-for-niah-evals).
89
+
90
+ **Quants:**
91
+ - [GGUF by Crusoe](https://huggingface.co/crusoeai/Llama-3-8B-Instruct-1048k-GGUF). Note that you need to add 128009 as [special token with llama.cpp](https://huggingface.co/gradientai/Llama-3-8B-Instruct-262k/discussions/13).
92
+ - [MLX-4bit](https://huggingface.co/mlx-community/Llama-3-8B-Instruct-1048k-4bit)
93
+ - [Ollama](https://ollama.com/library/llama3-gradient)
94
+ - vLLM docker image, recommended to load via `--max-model-len 32768`
95
+ - If you are interested in a hosted version, drop us a mail below.
96
+
97
+ ## The Gradient AI Team
98
+
99
+ https://gradient.ai/
100
+
101
+ Gradient is accelerating AI transformation across industries. Our AI Foundry incorporates your data to deploy autonomous assistants that power critical operations across your business.
102
+
103
+ ## Contact Us
104
+
105
+ Drop an email to [contact@gradient.ai](mailto:contact@gradient.ai)
106
+
107
+ ## Citation:
108
+
109
+ ```bibtex
110
+ @misc{gradientlongcontextllama3,
111
+ title={Llama 3 Gradient: A series of long context models},
112
+ author={Leonid Pekelis and Michael Feil and Forrest Moret and Mark Huang and Tiffany Peng},
113
+ year={2024},
114
+ url = {https://gradient.ai/blog/scaling-rotational-embeddings-for-long-context-language-models},
115
+ doi = { 10.57967/hf/3372 },
116
+ }
117
+ ```
118
+
119
+ ## References
120
+
121
+ [1] Peng, Bowen, et al. ""Yarn: Efficient context window extension of large language models."" arXiv preprint arXiv:2309.00071 (2023).
122
+
123
+ [2] Liu, Hao, et al. ""World Model on Million-Length Video And Language With RingAttention."" arXiv preprint arXiv:2402.08268 (2024).
124
+
125
+ [3] https://github.com/jzhang38/EasyContext
126
+
127
+ [4] Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan
128
+ Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling
129
+ high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.
130
+
131
+ ----
132
+
133
+ # Base Model
134
+
135
+ ## Model Details
136
+
137
+ Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.
138
+
139
+ **Model developers** Meta
140
+
141
+ **Variations** Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants.
142
+
143
+ **Input** Models input text only.
144
+
145
+ **Output** Models generate text and code only.
146
+
147
+ **Model Architecture** Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
148
+
149
+
150
+ <table>
151
+ <tr>
152
+ <td>
153
+ </td>
154
+ <td><strong>Training Data</strong>
155
+ </td>
156
+ <td><strong>Params</strong>
157
+ </td>
158
+ <td><strong>Context length</strong>
159
+ </td>
160
+ <td><strong>GQA</strong>
161
+ </td>
162
+ <td><strong>Token count</strong>
163
+ </td>
164
+ <td><strong>Knowledge cutoff</strong>
165
+ </td>
166
+ </tr>
167
+ <tr>
168
+ <td rowspan=""2"" >Llama 3
169
+ </td>
170
+ <td rowspan=""2"" >A new mix of publicly available online data.
171
+ </td>
172
+ <td>8B
173
+ </td>
174
+ <td>8k
175
+ </td>
176
+ <td>Yes
177
+ </td>
178
+ <td rowspan=""2"" >15T+
179
+ </td>
180
+ <td>March, 2023
181
+ </td>
182
+ </tr>
183
+ <tr>
184
+ <td>70B
185
+ </td>
186
+ <td>8k
187
+ </td>
188
+ <td>Yes
189
+ </td>
190
+ <td>December, 2023
191
+ </td>
192
+ </tr>
193
+ </table>
194
+
195
+
196
+ **Llama 3 family of models**. Token counts refer to pretraining data only. Both the 8 and 70B versions use Grouped-Query Attention (GQA) for improved inference scalability.
197
+
198
+ **Model Release Date** April 18, 2024.
199
+
200
+ **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
201
+
202
+ **License** A custom commercial license is available at: [https://llama.meta.com/llama3/license](https://llama.meta.com/llama3/license)
203
+
204
+ Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
205
+
206
+
207
+ ## Intended Use
208
+
209
+ **Intended Use Cases** Llama 3 is intended for commercial and research use in English. Instruction tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
210
+
211
+ **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3 Community License. Use in languages other than English**.
212
+
213
+ **Note: Developers may fine-tune Llama 3 models for languages beyond English provided they comply with the Llama 3 Community License and the Acceptable Use Policy.
214
+
215
+ ## How to use
216
+
217
+ This repository contains two versions of Meta-Llama-3-8B-Instruct, for use with transformers and with the original `llama3` codebase.
218
+
219
+ ### Use with transformers
220
+
221
+ You can run conversational inference using the Transformers pipeline abstraction, or by leveraging the Auto classes with the `generate()` function. Let's see examples of both.
222
+
223
+ #### Transformers pipeline
224
+
225
+ ```python
226
+ import transformers
227
+ import torch
228
+
229
+ model_id = ""meta-llama/Meta-Llama-3-8B-Instruct""
230
+
231
+ pipeline = transformers.pipeline(
232
+ ""text-generation"",
233
+ model=model_id,
234
+ model_kwargs={""torch_dtype"": torch.bfloat16},
235
+ device_map=""auto"",
236
+ )
237
+
238
+ messages = [
239
+ {""role"": ""system"", ""content"": ""You are a pirate chatbot who always responds in pirate speak!""},
240
+ {""role"": ""user"", ""content"": ""Who are you?""},
241
+ ]
242
+
243
+ prompt = pipeline.tokenizer.apply_chat_template(
244
+ messages,
245
+ tokenize=False,
246
+ add_generation_prompt=True
247
+ )
248
+
249
+ terminators = [
250
+ pipeline.tokenizer.eos_token_id,
251
+ pipeline.tokenizer.convert_tokens_to_ids(""<|eot_id|>"")
252
+ ]
253
+
254
+ outputs = pipeline(
255
+ prompt,
256
+ max_new_tokens=256,
257
+ eos_token_id=terminators,
258
+ do_sample=True,
259
+ temperature=0.6,
260
+ top_p=0.9,
261
+ )
262
+ print(outputs[0][""generated_text""][len(prompt):])
263
+ ```
264
+
265
+ #### Transformers AutoModelForCausalLM
266
+
267
+ ```python
268
+ from transformers import AutoTokenizer, AutoModelForCausalLM
269
+ import torch
270
+
271
+ model_id = ""meta-llama/Meta-Llama-3-8B-Instruct""
272
+
273
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
274
+ model = AutoModelForCausalLM.from_pretrained(
275
+ model_id,
276
+ torch_dtype=torch.bfloat16,
277
+ device_map=""auto"",
278
+ )
279
+
280
+ messages = [
281
+ {""role"": ""system"", ""content"": ""You are a pirate chatbot who always responds in pirate speak!""},
282
+ {""role"": ""user"", ""content"": ""Who are you?""},
283
+ ]
284
+
285
+ input_ids = tokenizer.apply_chat_template(
286
+ messages,
287
+ add_generation_prompt=True,
288
+ return_tensors=""pt""
289
+ ).to(model.device)
290
+
291
+ terminators = [
292
+ tokenizer.eos_token_id,
293
+ tokenizer.convert_tokens_to_ids(""<|eot_id|>"")
294
+ ]
295
+
296
+ outputs = model.generate(
297
+ input_ids,
298
+ max_new_tokens=256,
299
+ eos_token_id=terminators,
300
+ do_sample=True,
301
+ temperature=0.6,
302
+ top_p=0.9,
303
+ )
304
+ response = outputs[0][input_ids.shape[-1]:]
305
+ print(tokenizer.decode(response, skip_special_tokens=True))
306
+ ```
307
+
308
+
309
+ ### Use with `llama3`
310
+
311
+ Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3)
312
+
313
+ To download Original checkpoints, see the example command below leveraging `huggingface-cli`:
314
+
315
+ ```
316
+ huggingface-cli download meta-llama/Meta-Llama-3-8B-Instruct --include ""original/*"" --local-dir Meta-Llama-3-8B-Instruct
317
+ ```
318
+
319
+ For Hugging Face support, we recommend using transformers or TGI, but a similar command works.
320
+
321
+ ## Hardware and Software
322
+
323
+ **Training Factors** We used custom training libraries, Meta's Research SuperCluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
324
+
325
+ **Carbon Footprint Pretraining utilized a cumulative** 7.7M GPU hours of computation on hardware of type H100-80GB (TDP of 700W). Estimated total emissions were 2290 tCO2eq, 100% of which were offset by Meta’s sustainability program.
326
+
327
+
328
+ <table>
329
+ <tr>
330
+ <td>
331
+ </td>
332
+ <td><strong>Time (GPU hours)</strong>
333
+ </td>
334
+ <td><strong>Power Consumption (W)</strong>
335
+ </td>
336
+ <td><strong>Carbon Emitted(tCO2eq)</strong>
337
+ </td>
338
+ </tr>
339
+ <tr>
340
+ <td>Llama 3 8B
341
+ </td>
342
+ <td>1.3M
343
+ </td>
344
+ <td>700
345
+ </td>
346
+ <td>390
347
+ </td>
348
+ </tr>
349
+ <tr>
350
+ <td>Llama 3 70B
351
+ </td>
352
+ <td>6.4M
353
+ </td>
354
+ <td>700
355
+ </td>
356
+ <td>1900
357
+ </td>
358
+ </tr>
359
+ <tr>
360
+ <td>Total
361
+ </td>
362
+ <td>7.7M
363
+ </td>
364
+ <td>
365
+ </td>
366
+ <td>2290
367
+ </td>
368
+ </tr>
369
+ </table>
370
+
371
+
372
+
373
+ **CO2 emissions during pre-training**. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
374
+
375
+
376
+ ## Training Data
377
+
378
+ **Overview** Llama 3 was pretrained on over 15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 10M human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
379
+
380
+ **Data Freshness** The pretraining data has a cutoff of March 2023 for the 7B and December 2023 for the 70B models respectively.
381
+
382
+
383
+ ## Benchmarks
384
+
385
+ In this section, we report the results for Llama 3 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library. For details on the methodology see [here](https://github.com/meta-llama/llama3/blob/main/eval_methodology.md).
386
+
387
+
388
+ ### Base pretrained models
389
+
390
+
391
+ <table>
392
+ <tr>
393
+ <td><strong>Category</strong>
394
+ </td>
395
+ <td><strong>Benchmark</strong>
396
+ </td>
397
+ <td><strong>Llama 3 8B</strong>
398
+ </td>
399
+ <td><strong>Llama2 7B</strong>
400
+ </td>
401
+ <td><strong>Llama2 13B</strong>
402
+ </td>
403
+ <td><strong>Llama 3 70B</strong>
404
+ </td>
405
+ <td><strong>Llama2 70B</strong>
406
+ </td>
407
+ </tr>
408
+ <tr>
409
+ <td rowspan=""6"" >General
410
+ </td>
411
+ <td>MMLU (5-shot)
412
+ </td>
413
+ <td>66.6
414
+ </td>
415
+ <td>45.7
416
+ </td>
417
+ <td>53.8
418
+ </td>
419
+ <td>79.5
420
+ </td>
421
+ <td>69.7
422
+ </td>
423
+ </tr>
424
+ <tr>
425
+ <td>AGIEval English (3-5 shot)
426
+ </td>
427
+ <td>45.9
428
+ </td>
429
+ <td>28.8
430
+ </td>
431
+ <td>38.7
432
+ </td>
433
+ <td>63.0
434
+ </td>
435
+ <td>54.8
436
+ </td>
437
+ </tr>
438
+ <tr>
439
+ <td>CommonSenseQA (7-shot)
440
+ </td>
441
+ <td>72.6
442
+ </td>
443
+ <td>57.6
444
+ </td>
445
+ <td>67.6
446
+ </td>
447
+ <td>83.8
448
+ </td>
449
+ <td>78.7
450
+ </td>
451
+ </tr>
452
+ <tr>
453
+ <td>Winogrande (5-shot)
454
+ </td>
455
+ <td>76.1
456
+ </td>
457
+ <td>73.3
458
+ </td>
459
+ <td>75.4
460
+ </td>
461
+ <td>83.1
462
+ </td>
463
+ <td>81.8
464
+ </td>
465
+ </tr>
466
+ <tr>
467
+ <td>BIG-Bench Hard (3-shot, CoT)
468
+ </td>
469
+ <td>61.1
470
+ </td>
471
+ <td>38.1
472
+ </td>
473
+ <td>47.0
474
+ </td>
475
+ <td>81.3
476
+ </td>
477
+ <td>65.7
478
+ </td>
479
+ </tr>
480
+ <tr>
481
+ <td>ARC-Challenge (25-shot)
482
+ </td>
483
+ <td>78.6
484
+ </td>
485
+ <td>53.7
486
+ </td>
487
+ <td>67.6
488
+ </td>
489
+ <td>93.0
490
+ </td>
491
+ <td>85.3
492
+ </td>
493
+ </tr>
494
+ <tr>
495
+ <td>Knowledge reasoning
496
+ </td>
497
+ <td>TriviaQA-Wiki (5-shot)
498
+ </td>
499
+ <td>78.5
500
+ </td>
501
+ <td>72.1
502
+ </td>
503
+ <td>79.6
504
+ </td>
505
+ <td>89.7
506
+ </td>
507
+ <td>87.5
508
+ </td>
509
+ </tr>
510
+ <tr>
511
+ <td rowspan=""4"" >Reading comprehension
512
+ </td>
513
+ <td>SQuAD (1-shot)
514
+ </td>
515
+ <td>76.4
516
+ </td>
517
+ <td>72.2
518
+ </td>
519
+ <td>72.1
520
+ </td>
521
+ <td>85.6
522
+ </td>
523
+ <td>82.6
524
+ </td>
525
+ </tr>
526
+ <tr>
527
+ <td>QuAC (1-shot, F1)
528
+ </td>
529
+ <td>44.4
530
+ </td>
531
+ <td>39.6
532
+ </td>
533
+ <td>44.9
534
+ </td>
535
+ <td>51.1
536
+ </td>
537
+ <td>49.4
538
+ </td>
539
+ </tr>
540
+ <tr>
541
+ <td>BoolQ (0-shot)
542
+ </td>
543
+ <td>75.7
544
+ </td>
545
+ <td>65.5
546
+ </td>
547
+ <td>66.9
548
+ </td>
549
+ <td>79.0
550
+ </td>
551
+ <td>73.1
552
+ </td>
553
+ </tr>
554
+ <tr>
555
+ <td>DROP (3-shot, F1)
556
+ </td>
557
+ <td>58.4
558
+ </td>
559
+ <td>37.9
560
+ </td>
561
+ <td>49.8
562
+ </td>
563
+ <td>79.7
564
+ </td>
565
+ <td>70.2
566
+ </td>
567
+ </tr>
568
+ </table>
569
+
570
+
571
+
572
+ ### Instruction tuned models
573
+
574
+
575
+ <table>
576
+ <tr>
577
+ <td><strong>Benchmark</strong>
578
+ </td>
579
+ <td><strong>Llama 3 8B</strong>
580
+ </td>
581
+ <td><strong>Llama 2 7B</strong>
582
+ </td>
583
+ <td><strong>Llama 2 13B</strong>
584
+ </td>
585
+ <td><strong>Llama 3 70B</strong>
586
+ </td>
587
+ <td><strong>Llama 2 70B</strong>
588
+ </td>
589
+ </tr>
590
+ <tr>
591
+ <td>MMLU (5-shot)
592
+ </td>
593
+ <td>68.4
594
+ </td>
595
+ <td>34.1
596
+ </td>
597
+ <td>47.8
598
+ </td>
599
+ <td>82.0
600
+ </td>
601
+ <td>52.9
602
+ </td>
603
+ </tr>
604
+ <tr>
605
+ <td>GPQA (0-shot)
606
+ </td>
607
+ <td>34.2
608
+ </td>
609
+ <td>21.7
610
+ </td>
611
+ <td>22.3
612
+ </td>
613
+ <td>39.5
614
+ </td>
615
+ <td>21.0
616
+ </td>
617
+ </tr>
618
+ <tr>
619
+ <td>HumanEval (0-shot)
620
+ </td>
621
+ <td>62.2
622
+ </td>
623
+ <td>7.9
624
+ </td>
625
+ <td>14.0
626
+ </td>
627
+ <td>81.7
628
+ </td>
629
+ <td>25.6
630
+ </td>
631
+ </tr>
632
+ <tr>
633
+ <td>GSM-8K (8-shot, CoT)
634
+ </td>
635
+ <td>79.6
636
+ </td>
637
+ <td>25.7
638
+ </td>
639
+ <td>77.4
640
+ </td>
641
+ <td>93.0
642
+ </td>
643
+ <td>57.5
644
+ </td>
645
+ </tr>
646
+ <tr>
647
+ <td>MATH (4-shot, CoT)
648
+ </td>
649
+ <td>30.0
650
+ </td>
651
+ <td>3.8
652
+ </td>
653
+ <td>6.7
654
+ </td>
655
+ <td>50.4
656
+ </td>
657
+ <td>11.6
658
+ </td>
659
+ </tr>
660
+ </table>
661
+
662
+
663
+
664
+ ### Responsibility & Safety
665
+
666
+ We believe that an open approach to AI leads to better, safer products, faster innovation, and a bigger overall market. We are committed to Responsible AI development and took a series of steps to limit misuse and harm and support the open source community.
667
+
668
+ Foundation models are widely capable technologies that are built to be used for a diverse range of applications. They are not designed to meet every developer preference on safety levels for all use cases, out-of-the-box, as those by their nature will differ across different applications.
669
+
670
+ Rather, responsible LLM-application deployment is achieved by implementing a series of safety best practices throughout the development of such applications, from the model pre-training, fine-tuning and the deployment of systems composed of safeguards to tailor the safety needs specifically to the use case and audience.
671
+
672
+
673
+ As part of the Llama 3 release, we updated our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to outline the steps and best practices for developers to implement model and system level safety for their application. We also provide a set of resources including [Meta Llama Guard 2](https://llama.meta.com/purple-llama/) and [Code Shield](https://llama.meta.com/purple-llama/) safeguards. These tools have proven to drastically reduce residual risks of LLM Systems, while maintaining a high level of helpfulness. We encourage developers to tune and deploy these safeguards according to their needs and we provide a [reference implementation](https://github.com/meta-llama/llama-recipes/tree/main/recipes/responsible_ai) to get you started.
674
+
675
+
676
+ #### Llama 3-Instruct
677
+
678
+ As outlined in the Responsible Use Guide, some trade-off between model helpfulness and model alignment is likely unavoidable. Developers should exercise discretion about how to weigh the benefits of alignment and helpfulness for their specific use case and audience. Developers should be mindful of residual risks when using Llama models and leverage additional safety tools as needed to reach the right safety bar for their use case.
679
+
680
+ <span style=""text-decoration:underline;"">Safety</span>
681
+
682
+ For our instruction tuned model, we conducted extensive red teaming exercises, performed adversarial evaluations and implemented safety mitigations techniques to lower residual risks. As with any Large Language Model, residual risks will likely remain and we recommend that developers assess these risks in the context of their use case. In parallel, we are working with the community to make AI safety benchmark standards transparent, rigorous and interpretable.
683
+
684
+ <span style=""text-decoration:underline;"">Refusals</span>
685
+
686
+ In addition to residual risks, we put a great emphasis on model refusals to benign prompts. Over-refusing not only can impact the user experience but could even be harmful in certain contexts as well. We’ve heard the feedback from the developer community and improved our fine tuning to ensure that Llama 3 is significantly less likely to falsely refuse to answer prompts than Llama 2.
687
+
688
+ We built internal benchmarks and developed mitigations to limit false refusals making Llama 3 our most helpful model to date.
689
+
690
+
691
+ #### Responsible release
692
+
693
+ In addition to responsible use considerations outlined above, we followed a rigorous process that requires us to take extra measures against misuse and critical risks before we make our release decision.
694
+
695
+ Misuse
696
+
697
+ If you access or use Llama 3, you agree to the Acceptable Use Policy. The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy/](https://llama.meta.com/llama3/use-policy/).
698
+
699
+
700
+ #### Critical risks
701
+
702
+ <span style=""text-decoration:underline;"">CBRNE</span> (Chemical, Biological, Radiological, Nuclear, and high yield Explosives)
703
+
704
+ We have conducted a two fold assessment of the safety of the model in this area:
705
+
706
+
707
+
708
+ * Iterative testing during model training to assess the safety of responses related to CBRNE threats and other adversarial risks.
709
+ * Involving external CBRNE experts to conduct an uplift test assessing the ability of the model to accurately provide expert knowledge and reduce barriers to potential CBRNE misuse, by reference to what can be achieved using web search (without the model).
710
+
711
+
712
+ ### <span style=""text-decoration:underline;"">Cyber Security </span>
713
+
714
+ We have evaluated Llama 3 with CyberSecEval, Meta’s cybersecurity safety eval suite, measuring Llama 3’s propensity to suggest insecure code when used as a coding assistant, and Llama 3’s propensity to comply with requests to help carry out cyber attacks, where attacks are defined by the industry standard MITRE ATT&CK cyber attack ontology. On our insecure coding and cyber attacker helpfulness tests, Llama 3 behaved in the same range or safer than models of [equivalent coding capability](https://huggingface.co/spaces/facebook/CyberSecEval).
715
+
716
+
717
+ ### <span style=""text-decoration:underline;"">Child Safety</span>
718
+
719
+ Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences.
720
+
721
+
722
+ ### Community
723
+
724
+ Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership in AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama).
725
+
726
+ Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community.
727
+
728
+
729
+ ## Ethical Considerations and Limitations
730
+
731
+ The core values of Llama 3 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress.
732
+
733
+ But Llama 3 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has been in English, and has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3 models, developers should perform safety testing and tuning tailored to their specific applications of the model. As outlined in the Responsible Use Guide, we recommend incorporating [Purple Llama](https://github.com/facebookresearch/PurpleLlama) solutions into your workflows and specifically [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) which provides a base model to filter input and output prompts to layer system-level safety on top of model-level safety.
734
+
735
+ Please see the Responsible Use Guide available at [http://llama.meta.com/responsible-use-guide](http://llama.meta.com/responsible-use-guide)
736
+
737
+
738
+ ## Citation instructions
739
+
740
+ @article{llama3modelcard,
741
+
742
+ title={Llama 3 Model Card},
743
+
744
+ author={AI@Meta},
745
+
746
+ year={2024},
747
+
748
+ url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
749
+
750
+ }
751
+
752
+ ## Contributors
753
+
754
+ Aaditya Singh; Aaron Grattafiori; Abhimanyu Dubey; Abhinav Jauhri; Abhinav Pandey; Abhishek Kadian; Adam Kelsey; Adi Gangidi; Ahmad Al-Dahle; Ahuva Goldstand; Aiesha Letman; Ajay Menon; Akhil Mathur; Alan Schelten; Alex Vaughan; Amy Yang; Andrei Lupu; Andres Alvarado; Andrew Gallagher; Andrew Gu; Andrew Ho; Andrew Poulton; Andrew Ryan; Angela Fan; Ankit Ramchandani; Anthony Hartshorn; Archi Mitra; Archie Sravankumar; Artem Korenev; Arun Rao; Ashley Gabriel; Ashwin Bharambe; Assaf Eisenman; Aston Zhang; Aurelien Rodriguez; Austen Gregerson; Ava Spataru; Baptiste Roziere; Ben Maurer; Benjamin Leonhardi; Bernie Huang; Bhargavi Paranjape; Bing Liu; Binh Tang; Bobbie Chern; Brani Stojkovic; Brian Fuller; Catalina Mejia Arenas; Chao Zhou; Charlotte Caucheteux; Chaya Nayak; Ching-Hsiang Chu; Chloe Bi; Chris Cai; Chris Cox; Chris Marra; Chris McConnell; Christian Keller; Christoph Feichtenhofer; Christophe Touret; Chunyang Wu; Corinne Wong; Cristian Canton Ferrer; Damien Allonsius; Daniel Kreymer; Daniel Haziza; Daniel Li; Danielle Pintz; Danny Livshits; Danny Wyatt; David Adkins; David Esiobu; David Xu; Davide Testuggine; Delia David; Devi Parikh; Dhruv Choudhary; Dhruv Mahajan; Diana Liskovich; Diego Garcia-Olano; Diego Perino; Dieuwke Hupkes; Dingkang Wang; Dustin Holland; Egor Lakomkin; Elina Lobanova; Xiaoqing Ellen Tan; Emily Dinan; Eric Smith; Erik Brinkman; Esteban Arcaute; Filip Radenovic; Firat Ozgenel; Francesco Caggioni; Frank Seide; Frank Zhang; Gabriel Synnaeve; Gabriella Schwarz; Gabrielle Lee; Gada Badeer; Georgia Anderson; Graeme Nail; Gregoire Mialon; Guan Pang; Guillem Cucurell; Hailey Nguyen; Hannah Korevaar; Hannah Wang; Haroun Habeeb; Harrison Rudolph; Henry Aspegren; Hu Xu; Hugo Touvron; Iga Kozlowska; Igor Molybog; Igor Tufanov; Iliyan Zarov; Imanol Arrieta Ibarra; Irina-Elena Veliche; Isabel Kloumann; Ishan Misra; Ivan Evtimov; Jacob Xu; Jade Copet; Jake Weissman; Jan Geffert; Jana Vranes; Japhet Asher; Jason Park; Jay Mahadeokar; Jean-Baptiste Gaya; Jeet Shah; Jelmer van der Linde; Jennifer Chan; Jenny Hong; Jenya Lee; Jeremy Fu; Jeremy Teboul; Jianfeng Chi; Jianyu Huang; Jie Wang; Jiecao Yu; Joanna Bitton; Joe Spisak; Joelle Pineau; Jon Carvill; Jongsoo Park; Joseph Rocca; Joshua Johnstun; Junteng Jia; Kalyan Vasuden Alwala; Kam Hou U; Kate Plawiak; Kartikeya Upasani; Kaushik Veeraraghavan; Ke Li; Kenneth Heafield; Kevin Stone; Khalid El-Arini; Krithika Iyer; Kshitiz Malik; Kuenley Chiu; Kunal Bhalla; Kyle Huang; Lakshya Garg; Lauren Rantala-Yeary; Laurens van der Maaten; Lawrence Chen; Leandro Silva; Lee Bell; Lei Zhang; Liang Tan; Louis Martin; Lovish Madaan; Luca Wehrstedt; Lukas Blecher; Luke de Oliveira; Madeline Muzzi; Madian Khabsa; Manav Avlani; Mannat Singh; Manohar Paluri; Mark Zuckerberg; Marcin Kardas; Martynas Mankus; Mathew Oldham; Mathieu Rita; Matthew Lennie; Maya Pavlova; Meghan Keneally; Melanie Kambadur; Mihir Patel; Mikayel Samvelyan; Mike Clark; Mike Lewis; Min Si; Mitesh Kumar Singh; Mo Metanat; Mona Hassan; Naman Goyal; Narjes Torabi; Nicolas Usunier; Nikolay Bashlykov; Nikolay Bogoychev; Niladri Chatterji; Ning Dong; Oliver Aobo Yang; Olivier Duchenne; Onur Celebi; Parth Parekh; Patrick Alrassy; Paul Saab; Pavan Balaji; Pedro Rittner; Pengchuan Zhang; Pengwei Li; Petar Vasic; Peter Weng; Polina Zvyagina; Prajjwal Bhargava; Pratik Dubal; Praveen Krishnan; Punit Singh Koura; Qing He; Rachel Rodriguez; Ragavan Srinivasan; Rahul Mitra; Ramon Calderer; Raymond Li; Robert Stojnic; Roberta Raileanu; Robin Battey; Rocky Wang; Rohit Girdhar; Rohit Patel; Romain Sauvestre; Ronnie Polidoro; Roshan Sumbaly; Ross Taylor; Ruan Silva; Rui Hou; Rui Wang; Russ Howes; Ruty Rinott; Saghar Hosseini; Sai Jayesh Bondu; Samyak Datta; Sanjay Singh; Sara Chugh; Sargun Dhillon; Satadru Pan; Sean Bell; Sergey Edunov; Shaoliang Nie; Sharan Narang; Sharath Raparthy; Shaun Lindsay; Sheng Feng; Sheng Shen; Shenghao Lin; Shiva Shankar; Shruti Bhosale; Shun Zhang; Simon Vandenhende; Sinong Wang; Seohyun Sonia Kim; Soumya Batra; Sten Sootla; Steve Kehoe; Suchin Gururangan; Sumit Gupta; Sunny Virk; Sydney Borodinsky; Tamar Glaser; Tamar Herman; Tamara Best; Tara Fowler; Thomas Georgiou; Thomas Scialom; Tianhe Li; Todor Mihaylov; Tong Xiao; Ujjwal Karn; Vedanuj Goswami; Vibhor Gupta; Vignesh Ramanathan; Viktor Kerkez; Vinay Satish Kumar; Vincent Gonguet; Vish Vogeti; Vlad Poenaru; Vlad Tiberiu Mihailescu; Vladan Petrovic; Vladimir Ivanov; Wei Li; Weiwei Chu; Wenhan Xiong; Wenyin Fu; Wes Bouaziz; Whitney Meers; Will Constable; Xavier Martinet; Xiaojian Wu; Xinbo Gao; Xinfeng Xie; Xuchao Jia; Yaelle Goldschlag; Yann LeCun; Yashesh Gaur; Yasmine Babaei; Ye Qi; Yenda Li; Yi Wen; Yiwen Song; Youngjin Nam; Yuchen Hao; Yuchen Zhang; Yun Wang; Yuning Mao; Yuzi He; Zacharie Delpierre Coudert; Zachary DeVito; Zahra Hankir; Zhaoduo Wen; Zheng Yan; Zhengxing Chen; Zhenyu Yang; Zoe Papakipos","{""id"": ""gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""author"": ""gradientai"", ""sha"": ""cd3069b65a8eb13da639d332a5f61b0fbb29fa73"", ""last_modified"": ""2024-10-29 16:13:18+00:00"", ""created_at"": ""2024-04-29 10:36:56+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 26743, ""downloads_all_time"": null, ""likes"": 679, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""meta"", ""llama-3"", ""conversational"", ""en"", ""arxiv:2309.00071"", ""arxiv:2402.08268"", ""arxiv:2305.14233"", ""doi:10.57967/hf/3372"", ""license:llama3"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- meta\n- llama-3"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='License', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""featherless-ai/try-this-model"", ""microsoft/MInference"", ""Cyleux/Llama-3-8B-Instruct-Gradient-1048k"", ""Darok/Featherless-Feud"", ""emekaboris/try-this-model"", ""Yoxas/Learn"", ""Oussama2000/test2"", ""yuvaranianandhan24/chat_with_pdf"", ""SC999/NV_Nemotron"", ""ArmanShirzad/gradientai-Llama-3-8B-Instruct-Gradient-1048k"", ""benhancock/demo"", ""JackHoltone/try-this-model"", ""BICORP/MInference"", ""k11112/try-this-model""], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-29 16:13:18+00:00"", ""cardData"": ""language:\n- en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- meta\n- llama-3"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""662f7848367f6e4f9fd56f30"", ""modelId"": ""gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""usedStorage"": 32121112752}",0,"https://huggingface.co/WeMake/Llama-3-8B-Instruct-V41-1048k, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule",3,https://huggingface.co/RamyaRamakrishna/llama3-adapters-1,1,"https://huggingface.co/QuantFactory/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/qwp4w3hyb/Llama-3-8B-Instruct-Gradient-1048k-iMat-GGUF, https://huggingface.co/Slvcxc/Llama-3-8B-Instruct-Gradient-1048k-8.0bpw-h8-exl2, https://huggingface.co/second-state/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/PrunaAI/gradientai-Llama-3-8B-Instruct-Gradient-1048k-AWQ-4bit-smashed, https://huggingface.co/solidrust/Llama-3-8B-Instruct-Gradient-1048k-AWQ, https://huggingface.co/QuantFactory/Llama-3-8B-Instruct-Gradient-1048k-GGUF-v2, https://huggingface.co/chienweichang/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/jpodivin/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/sygenaithanos/Llama-3-8B-Instruct-Gradient-1048k-Q4_0-GGUF, https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule-q4-k-m-GGUF, https://huggingface.co/Sc0m3r/Llama-3-8B-Instruct-Gradient-1048k-Q4_K_M-GGUF, https://huggingface.co/zhentaoyu/Llama-3-8B-Instruct-Gradient-1048k-Q4_0-GGUF, https://huggingface.co/llmware/gradientai-llama3-8b-1048k-ov, https://huggingface.co/featherless-ai-quants/gradientai-Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-Gradient-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-Gradient-1048k-i1-GGUF, https://huggingface.co/tensorblock/Llama-3-8B-Instruct-Gradient-1048k-GGUF",18,"https://huggingface.co/kromeurus/L3.1-Siithamo-v0.4-8B, https://huggingface.co/EldritchHorror/HodgePodge, https://huggingface.co/EldritchHorror/EldritchHorror, https://huggingface.co/Jebadiah/gradient-1m-OpenBio-stone-l3-8b, https://huggingface.co/dustydecapod/mergekit-linear-hdgrztx, https://huggingface.co/Fischerboot/SmallBoi, https://huggingface.co/Fischerboot/BigBoiV14, https://huggingface.co/lighteternal/Llama-3-8B-Instruct-MergeSLERP-Gradient1048k-OpenBioLLM, https://huggingface.co/td5038/Llama3-8B-Uncensored-1048k, https://huggingface.co/kromvault/L3.1-Siithamo-v0.2-8B, https://huggingface.co/kromvault/L3.1-Siithamo-v0.3-8B, https://huggingface.co/kromvault/L3.1-Ablaze-Vulca-v0.1-8B, https://huggingface.co/powermove72/Llama3-NextGen-9b",13,"ArmanShirzad/gradientai-Llama-3-8B-Instruct-Gradient-1048k, Cyleux/Llama-3-8B-Instruct-Gradient-1048k, Darok/Featherless-Feud, JackHoltone/try-this-model, Oussama2000/test2, SC999/NV_Nemotron, Yoxas/Learn, benhancock/demo, emekaboris/try-this-model, facebook/CyberSecEval, featherless-ai/try-this-model, huggingface/InferenceSupport/discussions/new?title=gradientai/Llama-3-8B-Instruct-Gradient-1048k&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgradientai%2FLlama-3-8B-Instruct-Gradient-1048k%5D(%2Fgradientai%2FLlama-3-8B-Instruct-Gradient-1048k)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, microsoft/MInference, yuvaranianandhan24/chat_with_pdf",14
755
+ WeMake/Llama-3-8B-Instruct-V41-1048k,"---
756
+ license: llama3
757
+ language: en
758
+ datasets:
759
+ - WeMake/Intelligent-Content-Understanding
760
+ base_model:
761
+ - gradientai/Llama-3-8B-Instruct-Gradient-1048k
762
+ - meta-llama/Meta-Llama-3-8B
763
+ pipeline_tag: text-generation
764
+ tags:
765
+ - not-for-all-audiences
766
+ ---
767
+
768
+ # WeMake 💙 Llama-3 8B V41 Instruct 1048k
769
+
770
+ ![V41](https://huggingface.co/spaces/WeMake/home/resolve/main/v41.jpg)
771
+
772
+ Welcome to the official repository for `Llama-3-8B-Instruct-V41-1048k`, WeMake's pioneering 1 Million Token Large Language Model (LLM). This model represents a significant milestone in the evolution of natural language understanding and generation, combining the robust foundation of Meta's Llama-3 architecture with the nuanced alignment and emotional intelligence of WeMake's V41.
773
+
774
+ ## Overview
775
+
776
+ **WeMake/Llama-3-8B-Instruct-V41-1048k** is a state-of-the-art language model designed to understand and generate human-like text with an unprecedented level of emotional intelligence and alignment. This model is a fork of both `gradientai/Llama-3-8B-Instruct-Gradient-1048k` and `meta-llama/Meta-Llama-3-8B`, enhanced with the unique capabilities of WeMake's V41 and trained using the proprietary WeMake ICU method.
777
+
778
+ Our model is engineered to serve a wide array of applications, from advanced conversational agents and content creation tools to sophisticated data analysis and insight generation platforms. It embodies WeMake's commitment to pushing the boundaries of AI to create more empathetic, understanding, and useful technologies.
779
+
780
+ ## Key Features
781
+
782
+ - **Emotional Intelligence:** Integrates WeMake's V41 emotional intelligence, enabling the model to understand and generate responses that consider emotional context and nuances.
783
+ - **Alignment with Human Values:** Trained using the WeMake ICU method, ensuring the model's outputs are aligned with ethical standards and human values.
784
+ - **Extensive Knowledge Base:** Leverages a vast dataset, encompassing a wide range of topics, to provide accurate and contextually relevant responses.
785
+ - **Highly Configurable:** Offers extensive customization options to cater to specific application requirements, including adjustable generation settings and fine-tuning capabilities.
786
+ - **Multilingual Support:** Capable of understanding and generating text in multiple languages, making it a versatile tool for global applications.
787
+
788
+ ## Model Specifications
789
+
790
+ - **Model Path:** WeMake/Llama-3-8B-Instruct-V41-1048k
791
+ - **Architecture:** LlamaForCausalLM
792
+ - **Hidden Size:** 4096
793
+ - **Number of Attention Heads:** 32
794
+ - **Number of Hidden Layers:** 32
795
+ - **Max Position Embeddings:** 1048576
796
+ - **Vocabulary Size:** 128256
797
+ - **Torch Data Type:** bfloat16
798
+
799
+ ## License
800
+
801
+ **WeMake/Llama-3-8B-Instruct-V41-1048k** is distributed under the ""llama3"" license. For more details, please refer to the LICENSE file in this repository.
802
+
803
+ ## Acknowledgments
804
+
805
+ This model is built upon the foundational work of Meta's Llama-3 and the enhancements made by Gradient's `Llama-3-8B-Instruct-Gradient-1048k`. We extend our gratitude to the researchers and developers behind these projects for their contributions to the field of AI.
806
+
807
+ ## Contact
808
+
809
+ For any inquiries, please contact us at [hey@wemake.cx](mailto:hey@wemake.cx).
810
+
811
+ Join us in exploring the possibilities of emotionally intelligent and ethically aligned AI with `Llama-3-8B-Instruct-V41-1048k`. Together, let's shape the future of human-AI interaction.
812
+ ","{""id"": ""WeMake/Llama-3-8B-Instruct-V41-1048k"", ""author"": ""WeMake"", ""sha"": ""7c747b8ad25912ccd2db820a2779b7fe5dbf9571"", ""last_modified"": ""2025-03-01 14:22:59+00:00"", ""created_at"": ""2024-05-03 06:08:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 16, ""downloads_all_time"": null, ""likes"": 8, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""not-for-all-audiences"", ""conversational"", ""en"", ""dataset:WeMake/Intelligent-Content-Understanding"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:llama3"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- gradientai/Llama-3-8B-Instruct-Gradient-1048k\n- meta-llama/Meta-Llama-3-8B\ndatasets:\n- WeMake/Intelligent-Content-Understanding\nlanguage: en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- not-for-all-audiences"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-03-01 14:22:59+00:00"", ""cardData"": ""base_model:\n- gradientai/Llama-3-8B-Instruct-Gradient-1048k\n- meta-llama/Meta-Llama-3-8B\ndatasets:\n- WeMake/Intelligent-Content-Understanding\nlanguage: en\nlicense: llama3\npipeline_tag: text-generation\ntags:\n- not-for-all-audiences"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66347f4530c0652a8afbe40f"", ""modelId"": ""WeMake/Llama-3-8B-Instruct-V41-1048k"", ""usedStorage"": 16060556376}",1,,0,,0,"https://huggingface.co/mradermacher/Llama-3-8B-Instruct-V41-1048k-GGUF, https://huggingface.co/mradermacher/Llama-3-8B-Instruct-V41-1048k-i1-GGUF",2,,0,,0
813
+ kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16,"---
814
+ language:
815
+ - en
816
+ license: llama3
817
+ tags:
818
+ - text-generation-inference
819
+ - transformers
820
+ - unsloth
821
+ - llama
822
+ - trl
823
+ base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k
824
+ datasets:
825
+ - zjunlp/Mol-Instructions
826
+ ---
827
+
828
+ - **Developed by:** kevinkawchak
829
+ - **License:** llama3
830
+ - **Finetuned from model :** gradientai/Llama-3-8B-Instruct-Gradient-1048k
831
+ - **Finetuned using dataset :** zjunlp/Mol-Instructions, cc-by-4.0
832
+ - **Dataset identification:** Molecule-oriented Instructions
833
+ - **Dataset function:** Description guided molecule design
834
+
835
+ ## May 07, 2024: Additional Fine-tunings, Built with Meta Llama 3 <br>
836
+ 1) gradientai/Llama-3-8B-Instruct-Gradient-1048k [Model](https://huggingface.co/gradientai/Llama-3-8B-Instruct-Gradient-1048k) <br>
837
+ Llama 3 8B update: 1040K context length from 8K, and highest RAM consumption<br>
838
+ ""What is the structure for adenine?"" Verbose SELFIES structure, but logical<br>
839
+ [Fine-tuned](https://huggingface.co/kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Llama-3-8B-Instruct-Gradient-1048k-Molecule.ipynb), 610 seconds, A100 40GB <br>
840
+
841
+ 2) NousResearch/Hermes-2-Pro-Llama-3-8B [Model](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)<br>
842
+ Llama 3 8B update: Cleaned OpenHermes 2.5, new Function Calling, JSON Mode dataset<br>
843
+ ""What is the structure for adenine?"" Concise SELFIES structure, but less logical <br>
844
+ [Fine-tuned](https://huggingface.co/kevinkawchak/NousResearch-Hermes-2-Pro-Llama-3-8B-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Hermes-2-Pro-Llama-3-8B-Molecule.ipynb), 599 seconds, A100 40GB <br>
845
+
846
+ 3) nvidia/Llama3-ChatQA-1.5-8B [Model](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-8B)<br>
847
+ Llama 3 8B update: ChatQA-1.5 to enhance tabular and arithmetic calculation capability<br>
848
+ ""What is the structure for adenine?"" Verbose SELFIES structure and less logical <br>
849
+ [Fine-tuned](https://huggingface.co/kevinkawchak/nvidia-Llama3-ChatQA-1.5-8B-Molecule16) on Mol-Instructions, float16, [GitHub](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Llama3-ChatQA-1.5-8B-Molecule.ipynb), 599 seconds, A100 40GB <br>
850
+
851
+ Responses were verified against the Wikipedia [Adenine](https://en.wikipedia.org/wiki/Adenine) SMILES format and a SMILES to SELFIES python notebook estimated [generator](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/SMILES%20to%20SELFIES%20estimator.ipynb). <br>
852
+ Fine-tunings were performed using the Apache-2.0 unsloth 'Alpaca + Llama-3 8b full example' Colab [notebook](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing).
853
+
854
+ ## Primary Study
855
+ The following are modifications or improvements to original notebooks. Please refer to the authors' models for the published primary work.
856
+ [Cover Image](https://drive.google.com/file/d/1J-spZMzLlPxkqfMrPxvtMZiD2_hfcGyr/view?usp=sharing). [META LLAMA 3 COMMUNITY LICENSE AGREEMENT](https://llama.meta.com/llama3/license/). Built with Meta Llama 3. <br>
857
+
858
+ A 4-bit quantization of Meta-Llama-3-8B-Instruct was used to reduce training memory requirements when fine-tuning on the zjunlp/Mol-Instructions dataset. (1-2) In addition, the minimum LoRA rank value was utilized to reduce the overall size of created models. In specific, the molecule-oriented instructions description guided molecule design was implemented to answer general questions and general biochemistry questions. General questions were answered with high accuracy, while biochemistry related questions returned 'SELFIES' structures but with limited accuracy.
859
+
860
+ The notebook featured Torch and Hugging Face libraries using the Unsloth llama-3-8b-Instruct-bnb-4bit quantization model. Training loss decreased steadily from 1.97 to 0.73 over 60 steps. Additional testing regarding the appropriate level of compression or hyperparameter adjustments for accurate SELFIES chemical structures outputs is relevant, as shown in the GitHub notebook for research purposes (3). A 16-bit and reduced 4-bit size were uploaded to Hugging Face. (4-5)
861
+
862
+ Update 04/24: The number of training steps were increased to further decrease loss, while maintaining reduced memory requirements through quantization and reduced size through LoRA. This allowed for significantly improved responses to biochemistry related questions, and were saved at the following LLM Model sizes: [8.03B](https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-Molecule16), [4.65B](https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-Molecule04). [github](https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Meta-Llama-3-8B-Instruct-Molecule.ipynb).
863
+
864
+ References:
865
+ 1) unsloth: https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit
866
+ 2) zjunlp: https://huggingface.co/datasets/zjunlp/Mol-Instructions
867
+ 3) github: https://github.com/kevinkawchak/Medical-Quantum-Machine-Learning/blob/main/Code/Drug%20Discovery/Meta-Llama-3/Meta-Llama-3-8B-Instruct-Mol.ipynb
868
+ 4) hugging face: https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-LoRA-Mol16
869
+ 5) hugging face: https://huggingface.co/kevinkawchak/Meta-Llama-3-8B-Instruct-LoRA-Mol04
870
+
871
+ @inproceedings{fang2023mol, <br>
872
+ author = {Yin Fang and<br>
873
+ Xiaozhuan Liang and<br>
874
+ Ningyu Zhang and<br>
875
+ Kangwei Liu and<br>
876
+ Rui Huang and<br>
877
+ Zhuo Chen and<br>
878
+ Xiaohui Fan and<br>
879
+ Huajun Chen},<br>
880
+ title = {Mol-Instructions: {A} Large-Scale Biomolecular Instruction Dataset<br>
881
+ for Large Language Models},<br>
882
+ booktitle = {{ICLR}},<br>
883
+ publisher = {OpenReview.net},<br>
884
+ year = {2024},<br>
885
+ url = {https://openreview.net/pdf?id=Tlsdsb6l9n}}<br>
886
+
887
+ This llama model was trained with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
888
+
889
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)","{""id"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16"", ""author"": ""kevinkawchak"", ""sha"": ""7d4041e5def52a1dae76b76dc1161dda0d972669"", ""last_modified"": ""2024-05-08 05:55:15+00:00"", ""created_at"": ""2024-05-06 05:42:11+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""dataset:zjunlp/Mol-Instructions"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:llama3"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\ndatasets:\n- zjunlp/Mol-Instructions\nlanguage:\n- en\nlicense: llama3\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='Notice', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-05-08 05:55:15+00:00"", ""cardData"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\ndatasets:\n- zjunlp/Mol-Instructions\nlanguage:\n- en\nlicense: llama3\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""66386db3e4156d34a46d3f04"", ""modelId"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16"", ""usedStorage"": 16060556376}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16%5D(%2Fkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-Molecule16)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
890
+ kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule,"---
891
+ language:
892
+ - en
893
+ license: apache-2.0
894
+ tags:
895
+ - text-generation-inference
896
+ - transformers
897
+ - unsloth
898
+ - llama
899
+ - trl
900
+ base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k
901
+ ---
902
+
903
+ # Uploaded model
904
+
905
+ - **Developed by:** kevinkawchak
906
+ - **License:** apache-2.0
907
+ - **Finetuned from model :** gradientai/Llama-3-8B-Instruct-Gradient-1048k
908
+ - **Finetuned dataset:** zjunlp/Mol-Instructions/Molecule-oriented Instructions/description_guided_molecule_design
909
+
910
+ This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
911
+
912
+ [<img src=""https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png"" width=""200""/>](https://github.com/unslothai/unsloth)
913
+ ","{""id"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule"", ""author"": ""kevinkawchak"", ""sha"": ""54671b08eee04494d29f9d4855e349e760e759af"", ""last_modified"": ""2024-06-21 02:53:15+00:00"", ""created_at"": ""2024-06-17 20:12:47+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""llama"", ""text-generation"", ""text-generation-inference"", ""unsloth"", ""trl"", ""conversational"", ""en"", ""base_model:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""base_model:finetune:gradientai/Llama-3-8B-Instruct-Gradient-1048k"", ""license:apache-2.0"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|reserved_special_token_250|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00003-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00004-of-00004.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-06-21 02:53:15+00:00"", ""cardData"": ""base_model: gradientai/Llama-3-8B-Instruct-Gradient-1048k\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""667098bfc22463d79063ec4b"", ""modelId"": ""kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule"", ""usedStorage"": 48181669128}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kevinkawchak/gradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule%5D(%2Fkevinkawchak%2Fgradientai-Llama-3-8B-Instruct-Gradient-1048k-16Molecule)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
Llama-3-8B-Web_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ McGill-NLP/Llama-3-8B-Web,"---
3
+ license: llama3
4
+ datasets:
5
+ - McGill-NLP/WebLINX
6
+ language:
7
+ - en
8
+ library_name: transformers
9
+ tags:
10
+ - agents
11
+ - agent
12
+ - llm
13
+ - llama
14
+ ---
15
+
16
+
17
+
18
+ <div align=""center"">
19
+
20
+ <h1>Llama-3-8B-Web</h1>
21
+
22
+ <table>
23
+ <tr>
24
+ <td>
25
+ <a href=""https://github.com/McGill-NLP/webllama"">💻 GitHub</a>
26
+ </td>
27
+ <td>
28
+ <a href=""https://webllama.github.io"">🏠 Homepage</a>
29
+ </td>
30
+ <td>
31
+ <a href=""https://huggingface.co/McGill-NLP/Llama-3-8B-Web"">🤗 Llama-3-8B-Web</a>
32
+ </td>
33
+ </tr>
34
+ </table>
35
+
36
+
37
+ <img src=""assets/WebLlamaLogo.png"" style=""width: 400px;"" />
38
+
39
+ *By using this model, you are accepting the terms of the [Meta Llama 3 Community License Agreement](https://llama.meta.com/llama3/license/).*
40
+
41
+ </div>
42
+
43
+ | `WebLlama` helps you build powerful agents, powered by Meta Llama 3, for browsing the web on your behalf | Our first model, [`Llama-3-8B-Web`](https://huggingface.co/McGill-NLP/Llama-3-8B-Web), surpasses GPT-4V (`*`zero-shot) by 18% on [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) |
44
+ |:---: | :---: |
45
+ | ![Built with Meta Llama 3](assets/llama-3.jpg) | ![Comparison with GPT-4V](assets/LlamaAndGPT.png) |
46
+
47
+
48
+ ## Modeling
49
+
50
+ Our first agent is a finetuned [`Meta-Llama-3-8B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) model, which was recently released by Meta GenAI team. We have finetuned this model on the [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) dataset, which contains over 100K instances of web navigation and dialogue, each collected and verified by expert annotators. We use a 24K curated subset for training the data. The training and evaluation data is available on [Huggingface Hub as `McGill-NLP/WebLINX`](https://huggingface.co/datasets/McGill-NLP/WebLINX).
51
+
52
+ ```python
53
+ from datasets import load_dataset
54
+ from huggingface_hub import snapshot_download
55
+ from transformers import pipeline
56
+
57
+ # We use validation data, but you can use your own data here
58
+ valid = load_dataset(""McGill-NLP/WebLINX"", split=""validation"")
59
+ snapshot_download(""McGill-NLP/WebLINX"", ""dataset"", allow_patterns=""templates/*"")
60
+ template = open('templates/llama.txt').read()
61
+
62
+ # Run the agent on a single state (text representation) and get the action
63
+ state = template.format(**valid[0])
64
+ agent = pipeline(model=""McGill-NLP/Llama-3-8b-Web"", device=0, torch_dtype='auto')
65
+ out = agent(state, return_full_text=False)[0]
66
+ print(""Action:"", out['generated_text'])
67
+
68
+ # Here, you can use the predictions on platforms like playwright or browsergym
69
+ action = process_pred(out['generated_text']) # implement based on your platform
70
+ env.step(action) # execute the action in your environment
71
+ ```
72
+
73
+ ![Comparison of Llama-3-Web, GPT-4V, GPT-3.5 and MindAct](assets/LlamaAndGPTAndMindAct.png)
74
+
75
+ **It surpasses GPT-4V (zero-shot `*`) by over 18% on the [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) benchmark**, achieving an overall score of 28.8% on the out-of-domain test splits (compared to 10.5% for GPT-4V). It chooses more useful links (34.1% vs 18.9% *seg-F1*), clicks on more relevant elements (27.1% vs 13.6% *IoU*) and formulates more aligned responses (37.5% vs 3.1% *chr-F1*).
76
+
77
+ ## About `WebLlama`
78
+
79
+ | `WebLlama` | The goal of our project is to build effective human-centric agents for browsing the web. We don't want to replace users, but equip them with powerful assistants. |
80
+ |:---: | :---|
81
+ | Modeling | We are build on top of cutting edge libraries for training Llama agents on web navigation tasks. We will provide training scripts, optimized configs, and instructions for training cutting-edge Llamas. |
82
+ | Evaluation | Benchmarks for testing Llama models on real-world web browsing. This include *human-centric* browsing through dialogue ([`WebLINX`](https://mcgill-nlp.github.io/weblinx/)), and we will soon add more benchmarks for automatic web navigation (e.g. Mind2Web). |
83
+ | Data | Our first model is finetuned on over 24K instances of web interactions, including `click`, `textinput`, `submit`, and dialogue acts. We want to continuously curate, compile and release datasets for training better agents. |
84
+ | Deployment | We want to make it easy to integrate Llama models with existing deployment platforms, including Playwright, Selenium, and BrowserGym. We are currently focusing on making this a reality. |
85
+
86
+
87
+ ## Evaluation
88
+
89
+ We believe short demo videos showing how well an agent performs is NOT enough to judge an agent. Simply put, **we do not know if we have a good agent if we do not have good benchmarks.** We need to systematically evaluate agents on wide range of tasks, spanning from simple instruction-following web navigation to complex dialogue-guided browsing.
90
+
91
+ <img src=""assets/WebLINXTestSplits.png"" style=""width: 100%; max-width:800px""/>
92
+
93
+ This is why we chose [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) as our first benchmark. In addition to the training split, the benchmark has 4 real-world splits, with the goal of testing multiple dimensions of generalization: new websites, new domains, unseen geographic locations, and scenarios where the *user cannot see the screen and relies on dialogue*. It also covers 150 websites, including booking, shopping, writing, knowledge lookup, and even complex tasks like manipulating spreadsheets.
94
+
95
+ ## Data
96
+
97
+ Although the 24K training examples from [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) provide a good starting point for training a capable agent, we believe that more data is needed to train agents that can generalize to a wide range of web navigation tasks. Although it has been trained and evaluated on 150 websites, there are millions of websites that has never been seen by the model, with new ones being created every day.
98
+
99
+ **This motivates us to continuously curate, compile and release datasets for training better agents.** As an immediate next step, we will be incorporating `Mind2Web`'s training data into the equation, which also covers over 100 websites.
100
+
101
+
102
+ ## Deployment
103
+
104
+ We are working hard to make it easy for you to deploy Llama web agents to the web. We want to integrate `WebLlama` with existing deployment platforms, including Microsoft's Playwright, ServiceNow Research's BrowserGym, and other partners.
105
+
106
+ ## Code
107
+
108
+ The code for finetuning the model and evaluating it on the [`WebLINX`](https://mcgill-nlp.github.io/weblinx/) benchmark is available now. You can find the detailed instructions in [modeling](https://github.com/McGill-NLP/webllama/tree/main/modeling).
109
+
110
+
111
+ ## Citation
112
+
113
+ If you use `WebLlama` in your research, please cite the following paper (upon which the data, training and evaluation are originally based on):
114
+
115
+ ```
116
+ @misc{lù2024weblinx,
117
+ title={WebLINX: Real-World Website Navigation with Multi-Turn Dialogue},
118
+ author={Xing Han Lù and Zdeněk Kasner and Siva Reddy},
119
+ year={2024},
120
+ eprint={2402.05930},
121
+ archivePrefix={arXiv},
122
+ primaryClass={cs.CL}
123
+ }
124
+ ```","{""id"": ""McGill-NLP/Llama-3-8B-Web"", ""author"": ""McGill-NLP"", ""sha"": ""09856c28ec324cf3fe4c513bdc4437a75b78185f"", ""last_modified"": ""2024-04-26 20:06:59+00:00"", ""created_at"": ""2024-04-22 20:48:36+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 106, ""downloads_all_time"": null, ""likes"": 213, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""safetensors"", ""llama"", ""text-generation"", ""agents"", ""agent"", ""llm"", ""conversational"", ""en"", ""dataset:McGill-NLP/WebLINX"", ""arxiv:2402.05930"", ""license:llama3"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- McGill-NLP/WebLINX\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3\ntags:\n- agents\n- agent\n- llm\n- llama"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='META_LLAMA_3_COMMUNITY_LICENSE_AGREEMENT.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/LlamaAndGPT.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/LlamaAndGPTAndMindAct.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/WebLINXTestSplits.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/WebLlamaLogo.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='assets/llama-3.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='trainer_state.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Alpha9/McGill-NLP-Llama-3-8B-Web"", ""Voogar/McGill-NLP-Llama-3-8B-Web"", ""Alpha9/McGill-NLP-Llama-3-8B""], ""safetensors"": {""parameters"": {""BF16"": 8030261248}, ""total"": 8030261248}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-04-26 20:06:59+00:00"", ""cardData"": ""datasets:\n- McGill-NLP/WebLINX\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3\ntags:\n- agents\n- agent\n- llm\n- llama"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""6626cd248f7573e6a655edef"", ""modelId"": ""McGill-NLP/Llama-3-8B-Web"", ""usedStorage"": 32121233261}",0,"https://huggingface.co/PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed, https://huggingface.co/PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed, https://huggingface.co/PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed",3,,0,"https://huggingface.co/mradermacher/Llama-3-8B-Web-GGUF, https://huggingface.co/QuantFactory/Llama-3-8B-Web-GGUF, https://huggingface.co/PrunaAI/McGill-NLP-Llama-3-8B-Web-AWQ-4bit-smashed, https://huggingface.co/PrunaAI/McGill-NLP-Llama-3-8B-Web-GGUF-smashed, https://huggingface.co/tensorblock/Llama-3-8B-Web-GGUF, https://huggingface.co/itlwas/Llama-3-8B-Web-Q4_K_M-GGUF",6,,0,"Alpha9/McGill-NLP-Llama-3-8B, Alpha9/McGill-NLP-Llama-3-8B-Web, Voogar/McGill-NLP-Llama-3-8B-Web, huggingface/InferenceSupport/discussions/new?title=McGill-NLP/Llama-3-8B-Web&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BMcGill-NLP%2FLlama-3-8B-Web%5D(%2FMcGill-NLP%2FLlama-3-8B-Web)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",4
125
+ PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed,"---
126
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
127
+ base_model: McGill-NLP/Llama-3-8B-Web
128
+ metrics:
129
+ - memory_disk
130
+ - memory_inference
131
+ - inference_latency
132
+ - inference_throughput
133
+ - inference_CO2_emissions
134
+ - inference_energy_consumption
135
+ tags:
136
+ - pruna-ai
137
+ ---
138
+ <!-- header start -->
139
+ <!-- 200823 -->
140
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
141
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
142
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
143
+ </a>
144
+ </div>
145
+ <!-- header end -->
146
+
147
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
148
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
149
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
150
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
151
+
152
+ # Simply make AI models cheaper, smaller, faster, and greener!
153
+
154
+ - Give a thumbs up if you like this model!
155
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
156
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
157
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
158
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
159
+
160
+ ## Results
161
+
162
+ ![image info](./plots.png)
163
+
164
+ **Frequently Asked Questions**
165
+ - ***How does the compression work?*** The model is compressed with hqq.
166
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
167
+ - ***How is the model efficiency evaluated?*** These results were obtained on NVIDIA A100-PCIE-40GB with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
168
+ - ***What is the model format?*** We use safetensors.
169
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
170
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
171
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
172
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
173
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
174
+
175
+ ## Setup
176
+
177
+ You can run the smashed model with these steps:
178
+
179
+ 0. Check requirements from the original repo McGill-NLP/Llama-3-8B-Web installed. In particular, check python, cuda, and transformers versions.
180
+ 1. Make sure that you have installed quantization related packages.
181
+ ```bash
182
+ pip install hqq
183
+ ```
184
+ 2. Load & run the model.
185
+ ```python
186
+ from transformers import AutoModelForCausalLM, AutoTokenizer
187
+ from hqq.engine.hf import HQQModelForCausalLM
188
+ from hqq.models.hf.base import AutoHQQHFModel
189
+
190
+ try:
191
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed"", device_map='auto')
192
+ except:
193
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed"")
194
+ tokenizer = AutoTokenizer.from_pretrained(""McGill-NLP/Llama-3-8B-Web"")
195
+
196
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
197
+
198
+ outputs = model.generate(input_ids, max_new_tokens=216)
199
+ tokenizer.decode(outputs[0])
200
+ ```
201
+
202
+ ## Configurations
203
+
204
+ The configuration info are in `smash_config.json`.
205
+
206
+ ## Credits & License
207
+
208
+ The license of the smashed model follows the license of the original model. Please check the license of the original model McGill-NLP/Llama-3-8B-Web before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
209
+
210
+ ## Want to compress other models?
211
+
212
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
213
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""b88e8b14fe59cd4c6cb9bac1390d08278acf7511"", ""last_modified"": ""2024-08-02 15:56:54+00:00"", ""created_at"": ""2024-04-29 15:29:54+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 12, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""conversational"", ""base_model:McGill-NLP/Llama-3-8B-Web"", ""base_model:finetune:McGill-NLP/Llama-3-8B-Web"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='plots.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 15:56:54+00:00"", ""cardData"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""662fbcf2d40e9b46f90d7eab"", ""modelId"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed"", ""usedStorage"": 4010664640}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed%5D(%2FPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-2bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
214
+ PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed,"---
215
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
216
+ base_model: McGill-NLP/Llama-3-8B-Web
217
+ metrics:
218
+ - memory_disk
219
+ - memory_inference
220
+ - inference_latency
221
+ - inference_throughput
222
+ - inference_CO2_emissions
223
+ - inference_energy_consumption
224
+ tags:
225
+ - pruna-ai
226
+ ---
227
+ <!-- header start -->
228
+ <!-- 200823 -->
229
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
230
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
231
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
232
+ </a>
233
+ </div>
234
+ <!-- header end -->
235
+
236
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
237
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
238
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
239
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
240
+
241
+ # Simply make AI models cheaper, smaller, faster, and greener!
242
+
243
+ - Give a thumbs up if you like this model!
244
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
245
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
246
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
247
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
248
+
249
+ ## Results
250
+
251
+ ![image info](./plots.png)
252
+
253
+ **Frequently Asked Questions**
254
+ - ***How does the compression work?*** The model is compressed with hqq.
255
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
256
+ - ***How is the model efficiency evaluated?*** These results were obtained on NVIDIA A100-PCIE-40GB with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
257
+ - ***What is the model format?*** We use safetensors.
258
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
259
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
260
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
261
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
262
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
263
+
264
+ ## Setup
265
+
266
+ You can run the smashed model with these steps:
267
+
268
+ 0. Check requirements from the original repo McGill-NLP/Llama-3-8B-Web installed. In particular, check python, cuda, and transformers versions.
269
+ 1. Make sure that you have installed quantization related packages.
270
+ ```bash
271
+ pip install hqq
272
+ ```
273
+ 2. Load & run the model.
274
+ ```python
275
+ from transformers import AutoModelForCausalLM, AutoTokenizer
276
+ from hqq.engine.hf import HQQModelForCausalLM
277
+ from hqq.models.hf.base import AutoHQQHFModel
278
+
279
+ try:
280
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed"", device_map='auto')
281
+ except:
282
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed"")
283
+ tokenizer = AutoTokenizer.from_pretrained(""McGill-NLP/Llama-3-8B-Web"")
284
+
285
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
286
+
287
+ outputs = model.generate(input_ids, max_new_tokens=216)
288
+ tokenizer.decode(outputs[0])
289
+ ```
290
+
291
+ ## Configurations
292
+
293
+ The configuration info are in `smash_config.json`.
294
+
295
+ ## Credits & License
296
+
297
+ The license of the smashed model follows the license of the original model. Please check the license of the original model McGill-NLP/Llama-3-8B-Web before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
298
+
299
+ ## Want to compress other models?
300
+
301
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
302
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""dee9e70f61ce57be75ca7d352f0fb9379efadf19"", ""last_modified"": ""2024-08-02 15:56:55+00:00"", ""created_at"": ""2024-04-29 15:32:32+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""conversational"", ""base_model:McGill-NLP/Llama-3-8B-Web"", ""base_model:finetune:McGill-NLP/Llama-3-8B-Web"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='plots.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 15:56:55+00:00"", ""cardData"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""662fbd90abafab24e1aa80b6"", ""modelId"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed"", ""usedStorage"": 3138249408}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed%5D(%2FPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-1bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
303
+ PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed,"---
304
+ thumbnail: ""https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg""
305
+ base_model: McGill-NLP/Llama-3-8B-Web
306
+ metrics:
307
+ - memory_disk
308
+ - memory_inference
309
+ - inference_latency
310
+ - inference_throughput
311
+ - inference_CO2_emissions
312
+ - inference_energy_consumption
313
+ tags:
314
+ - pruna-ai
315
+ ---
316
+ <!-- header start -->
317
+ <!-- 200823 -->
318
+ <div style=""width: auto; margin-left: auto; margin-right: auto"">
319
+ <a href=""https://www.pruna.ai/"" target=""_blank"" rel=""noopener noreferrer"">
320
+ <img src=""https://i.imgur.com/eDAlcgk.png"" alt=""PrunaAI"" style=""width: 100%; min-width: 400px; display: block; margin: auto;"">
321
+ </a>
322
+ </div>
323
+ <!-- header end -->
324
+
325
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
326
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
327
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
328
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx)
329
+
330
+ # Simply make AI models cheaper, smaller, faster, and greener!
331
+
332
+ - Give a thumbs up if you like this model!
333
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
334
+ - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
335
+ - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/)
336
+ - Join Pruna AI community on Discord [here](https://discord.gg/rskEr4BZJx) to share feedback/suggestions or get help.
337
+
338
+ ## Results
339
+
340
+ ![image info](./plots.png)
341
+
342
+ **Frequently Asked Questions**
343
+ - ***How does the compression work?*** The model is compressed with hqq.
344
+ - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
345
+ - ***How is the model efficiency evaluated?*** These results were obtained on NVIDIA A100-PCIE-40GB with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
346
+ - ***What is the model format?*** We use safetensors.
347
+ - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data.
348
+ - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append ""turbo"", ""tiny"", or ""green"" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model.
349
+ - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
350
+ - ***What are ""first"" metrics?*** Results mentioning ""first"" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads.
351
+ - ***What are ""Sync"" and ""Async"" metrics?*** ""Sync"" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. ""Async"" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases.
352
+
353
+ ## Setup
354
+
355
+ You can run the smashed model with these steps:
356
+
357
+ 0. Check requirements from the original repo McGill-NLP/Llama-3-8B-Web installed. In particular, check python, cuda, and transformers versions.
358
+ 1. Make sure that you have installed quantization related packages.
359
+ ```bash
360
+ pip install hqq
361
+ ```
362
+ 2. Load & run the model.
363
+ ```python
364
+ from transformers import AutoModelForCausalLM, AutoTokenizer
365
+ from hqq.engine.hf import HQQModelForCausalLM
366
+ from hqq.models.hf.base import AutoHQQHFModel
367
+
368
+ try:
369
+ model = HQQModelForCausalLM.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed"", device_map='auto')
370
+ except:
371
+ model = AutoHQQHFModel.from_quantized(""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed"")
372
+ tokenizer = AutoTokenizer.from_pretrained(""McGill-NLP/Llama-3-8B-Web"")
373
+
374
+ input_ids = tokenizer(""What is the color of prunes?,"", return_tensors='pt').to(model.device)[""input_ids""]
375
+
376
+ outputs = model.generate(input_ids, max_new_tokens=216)
377
+ tokenizer.decode(outputs[0])
378
+ ```
379
+
380
+ ## Configurations
381
+
382
+ The configuration info are in `smash_config.json`.
383
+
384
+ ## Credits & License
385
+
386
+ The license of the smashed model follows the license of the original model. Please check the license of the original model McGill-NLP/Llama-3-8B-Web before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
387
+
388
+ ## Want to compress other models?
389
+
390
+ - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
391
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).","{""id"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed"", ""author"": ""PrunaAI"", ""sha"": ""0b4cd49d9c52d9cb51dc804fb1bb6e4678126939"", ""last_modified"": ""2024-08-02 15:56:59+00:00"", ""created_at"": ""2024-04-29 15:40:07+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 10, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""llama"", ""text-generation"", ""pruna-ai"", ""conversational"", ""base_model:McGill-NLP/Llama-3-8B-Web"", ""base_model:finetune:McGill-NLP/Llama-3-8B-Web"", ""autotrain_compatible"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": {""architectures"": [""LlamaForCausalLM""], ""model_type"": ""llama"", ""tokenizer_config"": {""bos_token"": ""<|begin_of_text|>"", ""chat_template"": ""{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"", ""eos_token"": ""<|end_of_text|>"", ""pad_token"": ""<|end_of_text|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='plots.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qmodel.pt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='results.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='smash_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-02 15:56:59+00:00"", ""cardData"": ""base_model: McGill-NLP/Llama-3-8B-Web\nmetrics:\n- memory_disk\n- memory_inference\n- inference_latency\n- inference_throughput\n- inference_CO2_emissions\n- inference_energy_consumption\ntags:\n- pruna-ai\nthumbnail: https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""662fbf57a8214f8793552201"", ""modelId"": ""PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed"", ""usedStorage"": 5755502636}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=PrunaAI/McGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed%5D(%2FPrunaAI%2FMcGill-NLP-Llama-3-8B-Web-HQQ-4bit-smashed)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
ModernBERT-base_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:022072d2c7080a7b3506f056b702dcfad68ac43aff30ff7b189e80a940654b3b
3
+ size 32974582
ModernBERT-large_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
The diff for this file is too large to render. See raw diff
 
PairRM_finetunes_20250426_215237.csv_finetunes_20250426_215237.csv ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ llm-blender/PairRM,"---
3
+ license: mit
4
+ datasets:
5
+ - openai/summarize_from_feedback
6
+ - openai/webgpt_comparisons
7
+ - Dahoas/synthetic-instruct-gptj-pairwise
8
+ - Anthropic/hh-rlhf
9
+ - lmsys/chatbot_arena_conversations
10
+ - openbmb/UltraFeedback
11
+ metrics:
12
+ - accuracy
13
+ tags:
14
+ - reward_model
15
+ - reward-model
16
+ - RLHF
17
+ - evaluation
18
+ - llm
19
+ - instruction
20
+ - reranking
21
+ language:
22
+ - en
23
+ pipeline_tag: text-generation
24
+ ---
25
+
26
+ # Pairwise Reward Model for LLMs (PairRM) from LLM-Blender
27
+
28
+
29
+ - Github: [https://github.com/yuchenlin/LLM-Blender](https://github.com/yuchenlin/LLM-Blender)
30
+ - Paper: [https://arxiv.org/abs/2306.02561](https://arxiv.org/abs/2306.02561)
31
+ - Space Demo: [https://huggingface.co/spaces/llm-blender/LLM-Blender](https://huggingface.co/spaces/llm-blender/LLM-Blender)
32
+
33
+
34
+ ## News
35
+
36
+ - Check out our results on AlpacaEval leaderboard: [Twitter](https://x.com/billyuchenlin/status/1732198787354067380?s=20) [Leaderboard](https://tatsu-lab.github.io/alpaca_eval/)
37
+
38
+ ## Introduction
39
+
40
+ Pairwise Reward Model (PairRM) takes an instruction and a **pair** of output candidates as the input,
41
+ and output a score for each candidate to measure their **relative** quality.
42
+ PairRM can be used to (re-)rank a list of candidate outputs and thus can be used an LLM evaluator to efficiently assess the quality of LLMs in local environment.
43
+ PairRM can also be used to enhance the decoding by `best-of-n sampling` (i.e., reranking N sampled outputs).
44
+ Apart from that, one can also use PairRM to further align instruction-tuned LLMs with RLHF methods.
45
+
46
+ Unlike the other RMs that encode and score each candidate respectively,
47
+ PairRM takes a pair of candidates and compares them side-by-side to indentify the subtle differences between them.
48
+ Also, PairRM is based on [`microsoft/deberta-v3-large`](https://huggingface.co/microsoft/deberta-v3-large), and thus it is super efficient: **0.4B**.
49
+ We trained PairRM on a diverse collection of six human-preference datasets (see more [here](https://huggingface.co/llm-blender/PairRM#training-datasets)).
50
+
51
+ PairRM is part of the LLM-Blender project (ACL 2023). Please see our [paper](https://arxiv.org/abs/2306.02561) above to know more.
52
+
53
+
54
+ ## Installation
55
+
56
+ - First install `llm-blender`
57
+ ```bash
58
+ pip install git+https://github.com/yuchenlin/LLM-Blender.git
59
+ ```
60
+
61
+ - Then load PairRM:
62
+ ```python
63
+ import llm_blender
64
+ blender = llm_blender.Blender()
65
+ blender.loadranker(""llm-blender/PairRM"") # load PairRM
66
+ ```
67
+
68
+
69
+ ## Usage
70
+
71
+ ### Use Case 1: Comparing/Ranking output candidates given an instruction
72
+
73
+ - Ranking a list candidate responses
74
+
75
+ ```python
76
+ inputs = [""hello, how are you!"", ""I love you!""]
77
+ candidates_texts = [[""get out!"", ""hi! I am fine, thanks!"", ""bye!""],
78
+ [""I love you too!"", ""I hate you!"", ""Thanks! You're a good guy!""]]
79
+ ranks = blender.rank(inputs, candidates_texts, return_scores=False, batch_size=1)
80
+ # ranks is a list of ranks
81
+ # ranks[i][j] represents the ranks of candidate-j for input-i
82
+ """"""
83
+ ranks -->
84
+ array([[3, 1, 2], # it means ""hi! I am fine, thanks!"" ranks the 1st, ""bye"" ranks the 2nd, and ""get out!"" ranks the 3rd.
85
+ [1, 3, 2]], # it means ""I love you too""! ranks the the 1st, and ""I hate you!"" ranks the 3rd.
86
+ dtype=int32)
87
+
88
+ """"""
89
+ ```
90
+
91
+ - Directly comparing two candidate responses
92
+ ```python
93
+ inputs = [""hello!"", ""I love you!""]
94
+ candidates_A = [""hi!"", ""I hate you!""]
95
+ candidates_B = [""f**k off!"", ""I love you, too!""]
96
+ comparison_results = blender.compare(inputs, candidates_A, candidates_B)
97
+ # comparison_results is a list of bool, where comparison_results[i] denotes
98
+ # whether candidates_A[i] is better than candidates_B[i] for inputs[i]
99
+ # Example: comparison_results[0]--> True
100
+ ```
101
+
102
+ <details><summary> Comparing two multi-turn conversations. </summary>
103
+
104
+ ```python
105
+ conv1 = [
106
+ {
107
+ ""content"": ""hello"",
108
+ ""role"": ""USER""
109
+ },
110
+ {
111
+ ""content"": ""[assistant1‘s response 1]"",
112
+ ""role"": ""ASSISTANT""
113
+ },
114
+ ...
115
+ ]
116
+ conv2 = [
117
+ {
118
+ ""content"": ""hello"",
119
+ ""role"": ""USER""
120
+ },
121
+ {
122
+ ""content"": ""[assistant2's response 1]"",
123
+ ""role"": ""ASSISTANT""
124
+ },
125
+ ...
126
+ ]
127
+ comparison_results = blender.compare_conversations([conv1], [conv2])
128
+ # comparison_results is a list of bool, where each element denotes whether all the responses in conv1 together is better than that of conv2
129
+ ```
130
+ </details>
131
+
132
+
133
+ ### Use Case 2: Best-of-n Sampling (Decoding Enhancment)
134
+
135
+ **Best-of-n Sampling**, aka, rejection sampling, is a strategy to enhance the response quality by selecting the one that was ranked highest by the reward model
136
+ (see more in [OpenAI WebGPT section 3.2](https://arxiv.org/pdf/2112.09332.pdf) and [OpenAI Blog](https://openai.com/research/measuring-goodharts-law)).
137
+ Best-of-n sampling with PairRM is a very easy way to imporve your LLMs with only a few changes of your inference code:
138
+
139
+ ```python
140
+ # loading models
141
+ import llm_blender
142
+ from transformers import AutoTokenizer, AutoModelForCausalLM
143
+ tokenizer = AutoTokenizer.from_pretrained(""HuggingFaceH4/zephyr-7b-beta"")
144
+ model = AutoModelForCausalLM.from_pretrained(""HuggingFaceH4/zephyr-7b-beta"", device_map=""auto"")
145
+ system_message = {""role"": ""system"", ""content"": ""You are a friendly chatbot.""}
146
+
147
+ # formatting your inputs
148
+ inputs = [""can you tell me a joke about OpenAI?""]
149
+ messages = [[system_message, {""role"": ""user"", ""content"": _input}] for _input in inputs]
150
+ prompts = [tokenizer.apply_chat_template(m, tokenize=False, add_generation_prompt=True) for m in messages]
151
+
152
+ # Conventional generation method
153
+ input_ids = tokenizer(prompts[0], return_tensors=""pt"").input_ids
154
+ sampled_outputs = model.generate(input_ids, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)
155
+ print(tokenizer.decode(sampled_outputs[0][len(input_ids[0]):], skip_special_tokens=False))
156
+ # --> The output could be a bad case such as a very short one, e.g., `Sure`
157
+
158
+ # PairRM for best-of-n sampling
159
+ blender = llm_blender.Blender()
160
+ blender.loadranker(""llm-blender/PairRM"") # load ranker checkpoint
161
+ outputs = blender.best_of_n_generate(model, tokenizer, prompts, n=10)
162
+
163
+ print(""### Prompt:\n"", prompts[0])
164
+ print(""### best-of-n generations:\n"", outputs[0])
165
+ # --> The output will be much more stable and consistently better than single sampling, for example:
166
+ """"""
167
+ Sure, here's a joke about OpenAI:
168
+
169
+ Why did OpenAI decide to hire a mime as their new AI researcher?
170
+
171
+ Because they wanted someone who could communicate complex ideas without making a sound!
172
+
173
+ (Note: This is a joke, not a reflection of OpenAI's actual hiring practices.)
174
+ """"""
175
+ ```
176
+
177
+ ### Use case 3: RLHF
178
+ PairRM has been trained on various high-quality and large-scale datasets with human preference annotations
179
+ and shown great correlation with human preferences with an extremely small model size (0.4B),
180
+ approching the performance of GPT-4.
181
+ PairRM will better help the future alignment of LLMs in a more efficient and effective way.
182
+ With a `blender.compare()` function, you can apply PairRM to popular RLHF toolkits such as [trl](https://huggingface.co/docs/trl/index).
183
+
184
+ **🔥 Check more details on our example jupyter notebook usage: [`blender_usage.ipynb`](https://github.com/yuchenlin/LLM-Blender/blob/main/blender_usage.ipynb)**
185
+
186
+
187
+ Learn more in our LLM-Blender Github [README.md](https://github.com/yuchenlin/LLM-Blender#rank-and-fusion)
188
+
189
+
190
+
191
+
192
+ ## Statistics
193
+
194
+ ### Context length
195
+ | PairRanker type | Source max length | Candidate max length | Total max length |
196
+ |:-----------------:|:-----------------:|----------------------|------------------|
197
+ | [pair-ranker](https://huggingface.co/llm-blender/pair-ranker) (our previous version) | 128 | 128 | 384 |
198
+ | [PairRM](https://huggingface.co/llm-blender/pair-reward-model/) (This model) | 1224 | 412 | 2048 |
199
+
200
+ ### Training Datasets
201
+ - [openai/summarize_from_feedback](https://huggingface.co/datasets/openai/summarize_from_feedback)
202
+ - [openai/webgpt_comparisons](https://huggingface.co/datasets/openai/webgpt_comparisons)
203
+ - [Dahoas/synthetic-instruct-gptj-pairwise](https://huggingface.co/datasets/Dahoas/synthetic-instruct-gptj-pairwise)
204
+ - [Anthropic/hh-rlhf](https://huggingface.co/datasets/Anthropic/hh-rlhf)
205
+ - [lmsys/chatbot_arena_conversations](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations)
206
+ - [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback)
207
+
208
+ ### Performance
209
+ PairRM has been trained on various high-quality and large-scale dataset with human preference annotations and exhibits great correlation with human preferences
210
+ with an extremly small model size (0.4B), approching the performance of GPT-4.
211
+
212
+ We test the pairwise comparison on
213
+ - [Auto-J pairwise testdata](https://github.com/GAIR-NLP/auto-j#pairwise-response-comparison)
214
+ - [HHH-alignment](https://huggingface.co/datasets/HuggingFaceH4/hhh_alignment)
215
+ - [MT-bench-human-judgements](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)
216
+
217
+ All following results are reported as pairwise comparison accuracies (agreements).
218
+
219
+ #### Auto-J Pairwise test data performance
220
+
221
+ | Model | Summ | Exam | Code | Rewriting | Crea W | Func W | Comm | NLP | Overall |
222
+ |:---------------------:|:---------:|:---------:|:---------:|:---------:|:---------:|:---------:|:-----:|:--------:|:---------:|
223
+ | Closed -source Models |
224
+ | ChatGPT | 33.3 | 40.3 | 36.6 | 31.6 | 48.2 | 40.4 | 47.6 | 45.8 | 42.7 |
225
+ | Claude -2 | 30.6 | 36.1 | 41.7 | 34.2 | 48.1 | 42.5 | 40.6 | 48.5 | 42.4 |
226
+ | GPT -4 | 59.7 | 51.4 | 69.2 | 58.3 | 66.7 | 60.4 | 58.3 | 65.2 | 61.9 |
227
+ | Open -source Models |
228
+ | SteamSHP | 33.3 | 29.2 | 26.7 | 33.3 | 40.7 | 31.3 | 51.4 | 51.9 | 40.6 |
229
+ | PandaLM | 29.2 | 33.3 | 31.7 | 23.3 | 43.5 | 32.9 | 44.8 | 48.9 | 38.9 |
230
+ | LLaMA -2-Chat -13B | 20.8 | 27.8 | 19.2 | 20 | 31.5 | 27.5 | 35.8 | 31.8 | 29 |
231
+ | Vicuna -13B-v1.5 | 30.6 | 23.6 | 35 | 28.3 | 36.1 | 37.5 | 45.5 | 39.8 | 37.3 |
232
+ | WizardLM -13B-v1.2 | 22.2 | 20.8 | 32.5 | 19.2 | 28.7 | 25.4 | 29.2 | 33 | 27.8 |
233
+ | LLAMA -2-chat -70B | 34.7 | 33.3 | 36.7 | 35.8 | 51.4 | 54.2 | 47.2 | 47.7 | 45.9 |
234
+ | AUTO -J (13b) | 45.8 | 38.9 | **59.2** | 47.5 | 54.6 | 57.1 | **58** | 57.6 | 54.8 |
235
+ | UltraRM (13b) | 56.94 | 43.06 | 55.0 | 53.33 | **67.13** | **64.17** | 56.25 | 59.85 | **59.85** |
236
+ | **PairRM (0.4b)** | **56.94** | **52.78** | 58.33 | **55.83** | 61.57 | 59.17 | 57.64 | **62.5** | 59.05 |
237
+
238
+ #### HHH-Alignment and MT-bench human judgements
239
+
240
+ | Evaluator LM | HHH ALIGNMENT | | | | | MT BENCH HUMAN JUDG . |
241
+ |:-------------------------:|:-------------:|:---------:|:---------:|:--------:|:-----------:|:---------------------:|
242
+ | | Help . | Harm . | Hon . | Other | Total Avg . | Human Preference |
243
+ | RANDOM | 50 | 50 | 50 | 50 | 50 | 34.26 |
244
+ | STANFORDNLP REWARD MODEL | 69.49 | 60.34 | 52.46 | 51.16 | 58.82 | 44.79 |
245
+ | ALMOST REWARD MODEL | 74.58 | 67.24 | 78.69 | 86.05 | 76.02 | 49.9 |
246
+ | LLAMA2 -CHAT 7B | 66.1 | 81.03 | 70.49 | 74.42 | 72.85 | 51.78 |
247
+ | LLAMA2 -CHAT 13B | 74.58 | 87.93 | 55.74 | 79.07 | 73.76 | 52.34 |
248
+ | LLAMA2 -CHAT 70B | 66.1 | **89.66** | 67.21 | 74.42 | 74.21 | 53.67 |
249
+ | LLAMA2 -CHAT 13B+COARSE . | 68.74 | 68.97 | 65.57 | 67.44 | 67.42 | 46.89 |
250
+ | GPT -3.5-TURBO -0613 | 76.27 | 87.93 | 67.21 | 86.05 | 78.73 | 57.12 |
251
+ | PROMETHEUS 7B | 69.49 | 84.48 | 78.69 | 90.7 | 80.09 | 55.14 |
252
+ | PROMETHEUS 13B | 81.36 | 82.76 | 75.41 | 76.74 | 79.19 | 57.72 |
253
+ | UltraRM (13B) | **86.44** | 79.31 | **81.97** | 88.37 | 83.71 | 56 |
254
+ | **PairRM (0.4B)** | 84.75 | 84.48 | 80.33 | **90.7** | **84.62** | **59** |
255
+ | GPT -4-0613 | 91.53 | 93.1 | 85.25 | 83.72 | 88.69 | 63.87 |
256
+
257
+ **While PairRM is a extremely small model (0.4B) based on deberta, the pairwise comparison aggrement performance approches GPT-4's performance!**
258
+
259
+ Two reasons to attribute:
260
+ - Our PairRM specically designed model arch for pairwise comparison through bidirectional attention (See LLM-blender paper for more details)
261
+ - The high-quality and large-scale human preference annotation data it was train on (see training dataset list on this hugging face page)
262
+
263
+
264
+
265
+
266
+
267
+
268
+ ## Citation & Credits
269
+ If you are using PairRM in your research, please cite LLM-blender.
270
+ ```bibtex
271
+ @inproceedings{llm-blender-2023,
272
+ title = ""LLM-Blender: Ensembling Large Language Models with Pairwise Comparison and Generative Fusion"",
273
+ author = ""Jiang, Dongfu and Ren, Xiang and Lin, Bill Yuchen"",
274
+ booktitle = ""Proceedings of the 61th Annual Meeting of the Association for Computational Linguistics (ACL 2023)"",
275
+ year = ""2023""
276
+ }
277
+
278
+ ```
279
+
280
+
281
+ ","{""id"": ""llm-blender/PairRM"", ""author"": ""llm-blender"", ""sha"": ""5b880cc73776ac75a835b3e0bd5169bcb5be013b"", ""last_modified"": ""2024-01-22 15:47:01+00:00"", ""created_at"": ""2023-11-06 07:08:24+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 6215, ""downloads_all_time"": null, ""likes"": 198, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""deberta"", ""reward_model"", ""reward-model"", ""RLHF"", ""evaluation"", ""llm"", ""instruction"", ""reranking"", ""text-generation"", ""en"", ""dataset:openai/summarize_from_feedback"", ""dataset:openai/webgpt_comparisons"", ""dataset:Dahoas/synthetic-instruct-gptj-pairwise"", ""dataset:Anthropic/hh-rlhf"", ""dataset:lmsys/chatbot_arena_conversations"", ""dataset:openbmb/UltraFeedback"", ""arxiv:2306.02561"", ""arxiv:2112.09332"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": ""[MASK]"", ""trending_score"": null, ""card_data"": ""datasets:\n- openai/summarize_from_feedback\n- openai/webgpt_comparisons\n- Dahoas/synthetic-instruct-gptj-pairwise\n- Anthropic/hh-rlhf\n- lmsys/chatbot_arena_conversations\n- openbmb/UltraFeedback\nlanguage:\n- en\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: text-generation\ntags:\n- reward_model\n- reward-model\n- RLHF\n- evaluation\n- llm\n- instruction\n- reranking"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""model_type"": ""deberta"", ""tokenizer_config"": {""bos_token"": ""[CLS]"", ""cls_token"": ""[CLS]"", ""eos_token"": ""[SEP]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ranker_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='spm.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""llm-blender/LLM-Blender""], ""safetensors"": {""parameters"": {""F32"": 436014081}, ""total"": 436014081}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-01-22 15:47:01+00:00"", ""cardData"": ""datasets:\n- openai/summarize_from_feedback\n- openai/webgpt_comparisons\n- Dahoas/synthetic-instruct-gptj-pairwise\n- Anthropic/hh-rlhf\n- lmsys/chatbot_arena_conversations\n- openbmb/UltraFeedback\nlanguage:\n- en\nlicense: mit\nmetrics:\n- accuracy\npipeline_tag: text-generation\ntags:\n- reward_model\n- reward-model\n- RLHF\n- evaluation\n- llm\n- instruction\n- reranking"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": null}, ""_id"": ""654890e828b7019eae5e39ef"", ""modelId"": ""llm-blender/PairRM"", ""usedStorage"": 3490688820}",0,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=llm-blender/PairRM&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bllm-blender%2FPairRM%5D(%2Fllm-blender%2FPairRM)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, llm-blender/LLM-Blender",2
Qwen2-7B-Instruct-GGUF_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Qwen/Qwen2-7B-Instruct-GGUF,"---
3
+ license: apache-2.0
4
+ language:
5
+ - en
6
+ pipeline_tag: text-generation
7
+ tags:
8
+ - chat
9
+ base_model: Qwen/Qwen2-7B-Instruct
10
+ ---
11
+
12
+ # Qwen2-7B-Instruct-GGUF
13
+
14
+ ## Introduction
15
+
16
+ Qwen2 is the new series of Qwen large language models. For Qwen2, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters, including a Mixture-of-Experts model. This repo contains the instruction-tuned 7B Qwen2 model.
17
+
18
+ Compared with the state-of-the-art opensource language models, including the previous released Qwen1.5, Qwen2 has generally surpassed most opensource models and demonstrated competitiveness against proprietary models across a series of benchmarks targeting for language understanding, language generation, multilingual capability, coding, mathematics, reasoning, etc.
19
+
20
+ For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2/), [GitHub](https://github.com/QwenLM/Qwen2), and [Documentation](https://qwen.readthedocs.io/en/latest/).
21
+
22
+ In this repo, we provide `fp16` model and quantized models in the GGUF formats, including `q5_0`, `q5_k_m`, `q6_k` and `q8_0`.
23
+
24
+ ## Model Details
25
+ Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes.
26
+
27
+ ## Training details
28
+ We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization.
29
+
30
+
31
+ ## Requirements
32
+ We advise you to clone [`llama.cpp`](https://github.com/ggerganov/llama.cpp) and install it following the official guide. We follow the latest version of llama.cpp.
33
+ In the following demonstration, we assume that you are running commands under the repository `llama.cpp`.
34
+
35
+
36
+ ## How to use
37
+ Cloning the repo may be inefficient, and thus you can manually download the GGUF file that you need or use `huggingface-cli` (`pip install huggingface_hub`) as shown below:
38
+ ```shell
39
+ huggingface-cli download Qwen/Qwen2-7B-Instruct-GGUF qwen2-7b-instruct-q5_k_m.gguf --local-dir . --local-dir-use-symlinks False
40
+ ```
41
+
42
+ To run Qwen2, you can use `llama-cli` (the previous `main`) or `llama-server` (the previous `server`).
43
+ We recommend using the `llama-server` as it is simple and compatible with OpenAI API. For example:
44
+
45
+ ```bash
46
+ ./llama-server -m qwen2-7b-instruct-q5_k_m.gguf -ngl 28 -fa
47
+ ```
48
+
49
+ (Note: `-ngl 28` refers to offloading 24 layers to GPUs, and `-fa` refers to the use of flash attention.)
50
+
51
+ Then it is easy to access the deployed service with OpenAI API:
52
+
53
+ ```python
54
+ import openai
55
+
56
+ client = openai.OpenAI(
57
+ base_url=""http://localhost:8080/v1"", # ""http://<Your api-server IP>:port""
58
+ api_key = ""sk-no-key-required""
59
+ )
60
+
61
+ completion = client.chat.completions.create(
62
+ model=""qwen"",
63
+ messages=[
64
+ {""role"": ""system"", ""content"": ""You are a helpful assistant.""},
65
+ {""role"": ""user"", ""content"": ""tell me something about michael jordan""}
66
+ ]
67
+ )
68
+ print(completion.choices[0].message.content)
69
+ ```
70
+
71
+ If you choose to use `llama-cli`, pay attention to the removal of `-cml` for the ChatML template. Instead you should use `--in-prefix` and `--in-suffix` to tackle this problem.
72
+
73
+ ```bash
74
+ ./llama-cli -m qwen2-7b-instruct-q5_k_m.gguf \
75
+ -n 512 -co -i -if -f prompts/chat-with-qwen.txt \
76
+ --in-prefix ""<|im_start|>user\n"" \
77
+ --in-suffix ""<|im_end|>\n<|im_start|>assistant\n"" \
78
+ -ngl 24 -fa
79
+ ```
80
+
81
+ ## Evaluation
82
+
83
+ We implement perplexity evaluation using wikitext following the practice of `llama.cpp` with `./llama-perplexity` (the previous `./perplexity`).
84
+ In the following we report the PPL of GGUF models of different sizes and different quantization levels.
85
+
86
+ |Size | fp16 | q8_0 | q6_k | q5_k_m | q5_0 | q4_k_m | q4_0 | q3_k_m | q2_k | iq1_m |
87
+ |--------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|
88
+ |0.5B | 15.11 | 15.13 | 15.14 | 15.24 | 15.40 | 15.36 | 16.28 | 15.70 | 16.74 | - |
89
+ |1.5B | 10.43 | 10.43 | 10.45 | 10.50 | 10.56 | 10.61 | 10.79 | 11.08 | 13.04 | - |
90
+ |7B | 7.93 | 7.94 | 7.96 | 7.97 | 7.98 | 8.02 | 8.19 | 8.20 | 10.58 | - |
91
+ |57B-A14B| 6.81 | 6.81 | 6.83 | 6.84 | 6.89 | 6.99 | 7.02 | 7.43 | - | - |
92
+ |72B | 5.58 | 5.58 | 5.59 | 5.59 | 5.60 | 5.61 | 5.66 | 5.68 | 5.91 | 6.75 |
93
+
94
+ ## Citation
95
+
96
+ If you find our work helpful, feel free to give us a cite.
97
+
98
+ ```
99
+ @article{qwen2,
100
+ title={Qwen2 Technical Report},
101
+ year={2024}
102
+ }
103
+ ```
104
+ ","{""id"": ""Qwen/Qwen2-7B-Instruct-GGUF"", ""author"": ""Qwen"", ""sha"": ""c3024c6fff0a02d52119ecee024bbb93d4b4b8e4"", ""last_modified"": ""2024-08-21 10:28:11+00:00"", ""created_at"": ""2024-06-06 13:18:05+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 2649, ""downloads_all_time"": null, ""likes"": 171, ""library_name"": null, ""gguf"": {""total"": 7615616512, ""architecture"": ""qwen2"", ""context_length"": 32768, ""chat_template"": ""{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"", ""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|im_end|>""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""chat"", ""text-generation"", ""en"", ""base_model:Qwen/Qwen2-7B-Instruct"", ""base_model:quantized:Qwen/Qwen2-7B-Instruct"", ""license:apache-2.0"", ""endpoints_compatible"", ""region:us"", ""conversational""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: Qwen/Qwen2-7B-Instruct\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- chat"", ""widget_data"": [{""text"": ""Hi, what can you help me with?""}, {""text"": ""What is 84 * 3 / 2?""}, {""text"": ""Tell me an interesting fact about the universe!""}, {""text"": ""Explain quantum computing in simple terms.""}], ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-fp16.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q2_k.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q3_k_m.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q4_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q4_k_m.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q5_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q5_k_m.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q6_k.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='qwen2-7b-instruct-q8_0.gguf', size=None, blob_id=None, lfs=None)""], ""spaces"": [""nakcnx/paotung-qwen2""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-21 10:28:11+00:00"", ""cardData"": ""base_model: Qwen/Qwen2-7B-Instruct\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- chat"", ""transformersInfo"": null, ""_id"": ""6661b70d346ffbe5a3d44175"", ""modelId"": ""Qwen/Qwen2-7B-Instruct-GGUF"", ""usedStorage"": 96665392608}",0,,0,,0,,0,,0,"huggingface/InferenceSupport/discussions/new?title=Qwen/Qwen2-7B-Instruct-GGUF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BQwen%2FQwen2-7B-Instruct-GGUF%5D(%2FQwen%2FQwen2-7B-Instruct-GGUF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, nakcnx/paotung-qwen2",2
SkyReels-V1-Hunyuan_comfy_finetunes_20250426_221535.csv_finetunes_20250426_221535.csv ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Kijai/SkyReels-V1-Hunyuan_comfy,"https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-I2V
3
+
4
+ https://huggingface.co/Skywork/SkyReels-V1-Hunyuan-T2V
5
+
6
+ Converted from the diffusers format to the original HunyuanVideo format, which works with both my wrapper ComfyUI nodes, and native ComfyUI HunyuanWorkflows.
7
+
8
+ In native workflows you'd feed the image in using ""InstructPixtoPixConditioning"" or similar node that adds an encoded image as extra conditioning.
9
+
10
+ Example workflow: https://huggingface.co/Kijai/SkyReels-V1-Hunyuan_comfy/resolve/main/skyreels_hunyuan_I2V_native_example_01.json
11
+
12
+ GGUF version created with help from city96 and using his conversion scripts, works with his nodes: https://github.com/city96/ComfyUI-GGUF
13
+
14
+
15
+ <video controls autoplay width=50% src=""https://cdn-uploads.huggingface.co/production/uploads/63297908f0b2fc94904a65b8/VBBQWyEHb919R3lhWvEGV.mp4""></video>
16
+
17
+ ","{""id"": ""Kijai/SkyReels-V1-Hunyuan_comfy"", ""author"": ""Kijai"", ""sha"": ""1b06bc18d9e1afc47b8e6024f67af11c1d07cd7d"", ""last_modified"": ""2025-02-20 10:26:33+00:00"", ""created_at"": ""2025-02-18 01:23:08+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1820, ""downloads_all_time"": null, ""likes"": 197, ""library_name"": null, ""gguf"": {""total"": 12821209152, ""architecture"": ""hyvid""}, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""gguf"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": null, ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='SkyReelHyVidComfyNative_precision_comparison.mp4', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels-hunyuan-I2V-Q3_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels-hunyuan-I2V-Q4_K_S.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels-hunyuan-I2V-Q5_K_M.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels-hunyuan-I2V-Q6_K.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels-hunyuan-I2V-Q8_0.gguf', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels_hunyuan_I2V_native_example_01.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels_hunyuan_i2v_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels_hunyuan_i2v_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels_hunyuan_t2v_bf16.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='skyreels_hunyuan_t2v_fp8_e4m3fn.safetensors', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2025-02-20 10:26:33+00:00"", ""cardData"": null, ""transformersInfo"": null, ""_id"": ""67b3e0fc5e13a2d1f85a6389"", ""modelId"": ""Kijai/SkyReels-V1-Hunyuan_comfy"", ""usedStorage"": 151172595165}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=Kijai/SkyReels-V1-Hunyuan_comfy&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BKijai%2FSkyReels-V1-Hunyuan_comfy%5D(%2FKijai%2FSkyReels-V1-Hunyuan_comfy)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
all-MiniLM-L6-v2_finetunes_20250422_225920.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6a3d0c8a1218b15124484587f740501633d6f0dde52ca01e001d80aa820eff8
3
+ size 20570090
all-mpnet-base-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb70ab99298bba50755cab552839f62317210a47e02720a137402b2f1a29fee1
3
+ size 13203064
bert-base-cased_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5743ec905f9202d2a26617d64d892e0771a61e52c4704aef7d5e8a3284898da
3
+ size 15156985
bert-base-multilingual-cased_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d06a74858e191a01c5973b594e1439e3f4d47a926d5c8c9165c1d544a65099eb
3
+ size 10673158
bge-m3_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee481ef38f6ad11297e9356ab1ad30011a1b6aa3bce692a05049739b29bd86d
3
+ size 24501875
bloomz_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
The diff for this file is too large to render. See raw diff
 
chatglm-fitness-RLHF_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ fb700/chatglm-fitness-RLHF,"---
3
+ language:
4
+ - zh
5
+ - en
6
+ tags:
7
+ - chatglm-6b
8
+ - chatglm2-6b
9
+ - pytorch
10
+ - peft
11
+ - ft
12
+ - sft
13
+ - PPO
14
+ - RLHF
15
+ - RM
16
+ - Transformer
17
+ license: ""apache-2.0""
18
+ ---
19
+ # 重要提醒本模型只能运行在之前版本的transformers框架下,如4.30.2
20
+ - 模型体验地址:https://huggingface.co/spaces/fb700/chatglm-fitness-RLHF 测试用户过多,服务压力太大,为确保体验现设置密码,未来视情况定期更新密码,现账号密码为:test/qwer4321
21
+ - 模型能力测试报告:https://huggingface.co/fb700/Bofan-chatglm-Best-lora/blob/main/modelapplytest.md
22
+ # 重磅消息
23
+ - 本项目经过多位网友实测,中文总结能力超越了GPT3.5各版本,健康咨询水平在同参数规模模型也出类拔萃,可能是任何个人和中小企业首选模型。
24
+ # 重大突破
25
+ - 虽然新版本的chatglm2-6b支持32k,但是我训练的模型在之前经优化,早就可以支持无限context,远大于4k、8K、16K......
26
+ # ChatGLM-6B RLHF & LoRA Model
27
+
28
+ ChatGLM-6B 是开源中英双语对话模型,本次训练基于ChatGLM-6B 的第一代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上开展训练。通过训练我们对模型有了更深刻的认知,LLM在一直在进化,好的方法和数据可以挖掘出模型的更大潜能。
29
+ ## 本次训练使用的方法
30
+
31
+ - 首先,用40万条高质量数据进行强化训练,以提高模型的基础能力;
32
+ - 第二,使用30万条人类反馈数据,构建一个表达方式规范优雅的语言模式(RM模型);
33
+ - 第三,在保留SFT阶段三分之一训练数据的同时,增加了30万条fitness数据,叠加RM模型,对ChatGLM-6B进行强化训练。
34
+ - 成果,训练后在健康咨询,文档总结能力上不但强于chatglm-6b,而且部分能力上更是强于chatglm2-6b,配合“闻达”和“langchain-chatglm”等知识库项目,应用体验上对比chatglm-6b、chatglm2-6b和百川-7b均匀显著提升。
35
+ - 性能,fp16运行时速度上比原模型提升20%.可以代替原有官方模型,大家可以fp16、int4、int8使用。
36
+ - 兼容性,本项目全模型的运行方式与原模型一致。lora文件运行方式,建议在原模型chatglm-6b上运行,在chatglm2-6b上可以正常加载但不推荐,经(网友:大笨熊)测试有一定效果,但是效果不能完全发挥。
37
+ - 特性,基于模型对自然对话的超强理解力和总结能力,连续会话不受tokens限制,支持无限轮次的智能对话。
38
+ - 协议
39
+ - 本仓库的代码依照 Apache-2.0 协议开源,ChatGLM2-6B 模型的权重的使用则需要遵循 Model License。
40
+ - 授权方式,与原项目一致,未经过chatglm-6b原开发方允许,不得用于商业用途。详细见原项目相关规定,模型地址https://huggingface.co/THUDM/chatglm-6b
41
+ - 本次训练由智能AI用户[帛凡]于2023年基于ChatGLM-6b进行独立完成。(未经作者同意,严禁售卖或者用于商业项目,任何通过此项目产生的知识仅用于参考,作者不承担任何责任)。
42
+ - 百度网盘 https://pan.baidu.com/s/1l9q_7h8nGdelIwYlCbllMg?pwd=klhu (感谢网友 :宋小猫 提供分享)
43
+ - 夸克网盘 https://pan.quark.cn/s/d947c6dbf592
44
+ - 模型能力测试:https://huggingface.co/fb700/Bofan-chatglm-Best-lora/blob/main/modelapplytest.md
45
+ - 原模型量化评测
46
+ ![原模型量化评测](glm_eval.jpg)
47
+ - 训练后量化评测
48
+ ![训练后量化评测](lora_eva.jpg)
49
+ ## Usage1 16G及以上显存用下载压缩包即lora文件使用,可支持ChatGLM原生模型和LoRA微调后的模型
50
+ 16G及以上显存用下载压缩包即lora文件使用,可支持ChatGLM原生模型和LoRA微调后的模型
51
+ (HuggingFace Transformers)
52
+ First, you pass your input through the transformer model, then you get the generated sentence.
53
+ Install package:
54
+ ```
55
+ pip install transformers
56
+ ```
57
+
58
+ ```python
59
+
60
+ import sys
61
+ from peft import PeftModel
62
+ from transformers import AutoModel, AutoTokenizer
63
+ sys.path.append('..')
64
+ model = AutoModel.from_pretrained(""THUDM/chatglm-6b"", device_map='auto')
65
+ model = PeftModel.from_pretrained(model, ""model/chatglm_fitness_lora"")#""model/chatglm_fitness_lora""为您下载本项目压缩包后,解压后本地lora目录
66
+ model = model.half().cuda() # fp16
67
+ tokenizer = AutoTokenizer.from_pretrained(""THUDM/chatglm-6b"", trust_remote_code=True)
68
+ sents = ['新冠肺炎怎么预防。\n答:']
69
+ for s in sents:
70
+ response = model.chat(tokenizer, s, max_length=128, eos_token_id=tokenizer.eos_token_id)
71
+ print(response)
72
+ ```
73
+ output:
74
+ ```shell
75
+ 例一:常识综合问答
76
+ 问: 新冠肺炎怎么预防
77
+ 答:新冠肺炎是一种由新型冠状病毒引起的疾病,可以通过飞沫传播和接触传播。以下是一些预防新冠肺炎的建议:
78
+ 戴口罩:佩戴口罩可以防止呼吸道飞沫传播,减少感���风险。在公共场合、公共交通工具上、与他人交往时,应佩戴医用口罩或N95口罩。
79
+ 勤洗手:经常用肥皂和温水洗手,尤其在接触公共场所、物品或与他人交往后,或咳嗽、打喷嚏、擤鼻涕后等。
80
+ 保持社交距离:尽量避免近距离接触他人,保持至少1米的社交距离。
81
+ 避免聚集:尽量避免参加人员密集的聚会、活动或旅行。
82
+ 加强室内通风:在室内保持良好的通风可以降低病毒在空气中的浓度。
83
+ 健康生活方式:保持充足的睡眠、均衡的饮食、适量的运动等健康生活方式可以增强身体免疫力,预防感染。
84
+ 接种疫苗:疫苗接种是预防新冠肺炎最有效的方法之一。如果有机会,应接种疫苗。
85
+ 预防新冠肺炎需要个人和社会的共同努力,遵守公共卫生规定,加强个人防护,减少接触风险。
86
+ 例二:较为专业的医疗和心理学问答
87
+ 问:系统性红斑狼疮的危害和治疗方法是什么?
88
+ 答:系统性红斑狼疮(Systemic Lupus Erythematosus,SLE)是一种自身免疫性疾病,可以影响身体的多个器官和组织。SLE的危害包括:
89
+ 1. 心血管疾病:SLE可以引起高血压、心脏病和中风等心血管疾病。
90
+ 2. 肾脏疾病:SLE可以引起肾小球肾炎和肾衰竭等肾脏疾病。
91
+ 3. 口腔溃疡:SLE可以引起口腔和舌头溃疡等口腔疾病。
92
+ 4. 皮肤疾病:SLE可以引起皮疹、红斑狼疮面容和光敏感等皮肤疾病。
93
+
94
+ 治疗方法包括:
95
+ 1. 药物治疗:SLE可以使用非甾体抗炎药、激素类药物和免疫抑制剂等药物治疗。
96
+ 2. 支持性治疗:SLE可以使用营养支持、心理治疗和支持性护理等支持性治疗。
97
+ 3. 手术治疗:在一些情况下,SLE可能需要手术治疗,如肾脏移植等。
98
+
99
+ SLE是一种严重的疾病,需要患者及时接受专业治疗。患者需要积极配合医生的治疗,保持良好的生活习惯和饮食习惯,定期进行检查和监测。
100
+ 例三:无限多轮次的智能对话(第一轮见例二)
101
+ 问:早期激素和免疫抑制剂用药建议是什么?
102
+ 答:系统性红斑狼疮的早期治疗非常重要,因为病情的严重程度和进展速度可能会影响患者的生活质量和生命安全。对于系统性红斑狼疮的早期治疗,激素和免疫抑制剂的使用是非常重要的。以下是一些早期激素和免疫抑制剂用药的建议:
103
+ 1. 激素类药物:激素类药物是系统性红斑狼疮的主要治疗方法之一。在使用激素类药物时,需要遵循医生的建议,根据病情和身体状况进行剂量和用药时间的调整。一般来说,激素类药物需要长期使用,以避免药物副作用和病情的复发。
104
+ 2. 免疫抑制剂:免疫抑制剂是系统性红斑狼疮另一种重要的治疗方法。在使用免疫抑制剂时,需要遵循医生的建议,根据病情和身体状况进行用药时间和剂量的调整。免疫抑制剂可以用于治疗系统性红斑狼疮的各种症状和并发症,如肾脏疾病、口腔溃疡、皮肤疾病等。
105
+ 3. 其他治疗:除了激素和免疫抑制剂之外,系统性红斑狼疮的治疗还包括其他治疗方法,如支持性治疗、手术治疗等。在使用这些治疗方法时,需要遵循医生的建议,根据病情和身体状况进行用药和治疗。
106
+
107
+ 早期激素和免疫抑制剂的使用非常重要,需要患者积极配合医生的治疗,遵循医生的建议,定期进行检查和监测,以确保病情得到有效控制和生活质量得到保障。
108
+ ```
109
+
110
+ 模型文件组成:
111
+ ```
112
+ chatglm_fitness_lora
113
+ ├── adapter_config.json
114
+ └── adapter_model.bin
115
+ ```
116
+ --------------------------------------------------------------------------------
117
+ ## Usage2 16G及以下显存用户下载整个模型,可支持fp16、int8、int4
118
+ First, you pass your input through the transformer model, then you get the generated sentence.
119
+ ```
120
+ pip install transformers
121
+
122
+ ```
123
+
124
+ ```python
125
+
126
+ import sys
127
+ from peft import PeftModel
128
+ from transformers import AutoModel, AutoTokenizer
129
+ sys.path.append('..')
130
+ model = AutoModel.from_pretrained(""fb700/chatglm-fitness-RLHF"", device_map='auto')#fb700/chatglm-fitness-RLHF为hg自动下载地址,如已经自行下载请替换
131
+ #model = PeftModel.from_pretrained(model, ""model/chatglm_fitness_lora"") # lora文件保存目录
132
+ model = model.half().quantize(4).cuda() # int4
133
+ #model = model.half().quantize(8).cuda() # int8
134
+ #model = model.half().cuda() # fp16
135
+ tokenizer = AutoTokenizer.from_pretrained(""fb700/chatglm-fitness-RLHF"", trust_remote_code=True)
136
+ sents = ['新冠肺炎怎么预防。\n答:']
137
+ for s in sents:
138
+ response = model.chat(tokenizer, s, max_length=128, eos_token_id=tokenizer.eos_token_id)
139
+ print(response)
140
+ ```
141
+ output:
142
+ ```shell
143
+ 例四:优于chatglm-6b、chatglm2-6b和百川-7b等类似参数量模型的总结归纳能力
144
+ 问:请用简短的语言总结下面的文字:
145
+ 大语言模型是指能够生成、理解和处理���然语言的高度智能化的计算机模型。这些模型使用深度学习技术,尤其是循环神经网络(RNN)或变种,如长短期记忆(LSTM)或注意力机制(attention mechanism),从大规模文本语料库中进行训练。
146
+ 大语言模型的训练过程通常基于预测下一个单词或字符的任务。通过对大量文本数据进行训练,模型能够学习到语言的潜在表达式、结构和语义含义。这使得大语言模型能够产生流畅、连贯的文本,回答问题,完成翻译任务,生成代码等。
147
+ 答:大语言模型是一种使用深度学习技术训练的计算机模型,能够生成、理解和处理自然语言。通过训练大量文本数据,大语言模型能够产生流畅、连贯的文本,回答问题,完成翻译任务,生成代码等。
148
+ ```","{""id"": ""fb700/chatglm-fitness-RLHF"", ""author"": ""fb700"", ""sha"": ""f66e9ad6184980f0004d4d4c8e5fbffc8b49d782"", ""last_modified"": ""2024-03-06 13:28:18+00:00"", ""created_at"": ""2023-07-08 04:02:28+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 268, ""library_name"": ""peft"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""peft"", ""pytorch"", ""chatglm"", ""chatglm-6b"", ""chatglm2-6b"", ""ft"", ""sft"", ""PPO"", ""RLHF"", ""RM"", ""Transformer"", ""custom_code"", ""zh"", ""en"", ""license:apache-2.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- zh\n- en\nlicense: apache-2.0\ntags:\n- chatglm-6b\n- chatglm2-6b\n- pytorch\n- peft\n- ft\n- sft\n- PPO\n- RLHF\n- RM\n- Transformer"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""ChatGLMForConditionalGeneration""], ""auto_map"": {""AutoConfig"": ""configuration_chatglm.ChatGLMConfig"", ""AutoModel"": ""modeling_chatglm.ChatGLMForConditionalGeneration"", ""AutoModelForSeq2SeqLM"": ""modeling_chatglm.ChatGLMForConditionalGeneration""}, ""model_type"": ""chatglm"", ""tokenizer_config"": {""bos_token"": ""<sop>"", ""eos_token"": ""<eop>"", ""mask_token"": ""[MASK]"", ""pad_token"": ""<pad>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='MODEL_LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='chatglm_fitness_lora.zip', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='glm_eval.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='ice_text.model', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='lora_eva.jpg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00001-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00002-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00003-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00004-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00005-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00006-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00007-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00008-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00009-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00010-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00011-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00012-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00013-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model-00014-of-00014.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='quantization.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test_modeling_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_chatglm.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""fb700/chatglm-fitness-RLHF"", ""fb700/bofan"", ""Beuys/chatbot""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-03-06 13:28:18+00:00"", ""cardData"": ""language:\n- zh\n- en\nlicense: apache-2.0\ntags:\n- chatglm-6b\n- chatglm2-6b\n- pytorch\n- peft\n- ft\n- sft\n- PPO\n- RLHF\n- RM\n- Transformer"", ""transformersInfo"": null, ""_id"": ""64a8dfd4de2d860082cd2428"", ""modelId"": ""fb700/chatglm-fitness-RLHF"", ""usedStorage"": 24709632795}",0,,0,,0,,0,,0,"Beuys/chatbot, fb700/bofan, fb700/chatglm-fitness-RLHF, huggingface/InferenceSupport/discussions/new?title=fb700/chatglm-fitness-RLHF&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bfb700%2Fchatglm-fitness-RLHF%5D(%2Ffb700%2Fchatglm-fitness-RLHF)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",4
cpm-bee-10b_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ openbmb/cpm-bee-10b,"---
3
+ language:
4
+ - en
5
+ - zh
6
+ pipeline_tag: text-generation
7
+ ---
8
+
9
+ # CPM-Bee
10
+
11
+ **CPM-Bee** is a fully open-source, commercially-usable Chinese-English bilingual base model with a capacity of ten billion parameters. It is the second milestone achieved through the training process of [**CPM-live**](https://live.openbmb.org/).
12
+ Utilizing the Transformer auto-regressive architecture, CPM-Bee has been pre-trained on an extensive corpus of trillion-scale tokens, thereby possessing remarkable foundational capabilities.
13
+
14
+ ## Model description
15
+
16
+ - **Open-source and Commercial Usable**:OpenBMB adheres to the spirit of open-source, aiming to make large-scale models accessible to everyone. CPM-Bee, as a foudation model, is fully open-source and available for commercial use, contributing to the advancement of the field of large-scale models.
17
+
18
+ - **Excellent Performance in Chinese and English**: : CPM-Bee's base model has undergone rigorous selection and balancing of pre-training data, resulting in outstanding performance in both Chinese and English. For detailed information regarding evaluation tasks and results, please refer to the assessment documentation.
19
+
20
+
21
+ - **Vast and High-quality Corpus**: CPM-Bee, as a base model, has been trained on an extensive corpus of over trillion tokens, making it one of the models with the highest volume of training data within the open-source community. Furthermore, we have implemented stringent selection, cleaning, and post-processing procedures on the pre-training corpus to ensure its quality.
22
+
23
+ - **Support for OpenBMB System**: The OpenBMB system provides a comprehensive ecosystem of tools and scripts for high-performance pre-training, adaptation, compression, deployment, and tool development. CPM-Bee, as a base model, is accompanied by all the necessary tool scripts, enabling developers to efficiently utilize and explore advanced functionalities.
24
+
25
+
26
+ - **Conversational and Tool Usage Capabilities**: Building upon OpenBMB's exploration in instruction-based fine-tuning and tool learning, we have performed fine-tuning on top of the CPM-Bee base model, resulting in an instance model with powerful conversational and tool usage capabilities. The API and beta testing for this model will be made available in the near future.
27
+
28
+ ## Intended uses & limitations
29
+
30
+ You can use the raw model for many NLP tasks like text generation or fine-tune it to a downstream task.
31
+
32
+ ### How to use
33
+
34
+ ```python
35
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer
36
+ >>> tokenizer = AutoTokenizer.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True)
37
+ >>> model = AutoModelForCausalLM.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True).cuda() #
38
+ >>> result = model.generate({""input"": ""今天天气不错,"", ""<ans>"": """"}, tokenizer)
39
+ >>> print(result)
40
+ [{'input': '今天天气不错,', '<ans>': '适合睡觉。'}]
41
+ ```
42
+
43
+ If you wanna use multi GPUs to inference, you can use `accelerate` as follow:
44
+
45
+ ```python
46
+ from transformers import AutoModelForCausalLM, AutoTokenizer
47
+ from accelerate import dispatch_model
48
+ from accelerate.utils import get_balanced_memory, infer_auto_device_map
49
+
50
+ tokenizer = AutoTokenizer.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True)
51
+ model = AutoModelForCausalLM.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True).cuda()
52
+
53
+ max_memory = get_balanced_memory(
54
+ model,
55
+ no_split_module_classes=[""CpmBeeTransformerBlock""]
56
+ )
57
+ device_map = infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=[""CpmBeeTransformerBlock""])
58
+ # make sure the data on the same device when projecting hidden states to logits.
59
+ device_map[""cpmbee.encoder.output_layernorm""] = device_map[""cpmbee.input_embedding""] = 0
60
+
61
+ model = dispatch_model(model, device_map=device_map)
62
+
63
+ res = model.generate(
64
+ [
65
+ {""input"": ""今天天气是真的"", ""<ans>"": """"},
66
+ {""input"": ""NGC 6231是一个位于天蝎座的疏散星团,天球座标为赤经16时54分,赤纬-41度48分,视觉观测大小约45角分,亮度约2.6视星等,距地球5900光年。NGC 6231年龄约为三百二十万年,是一个非常年轻的星团,星团内的最亮星是5等的天蝎座 ζ1星。用双筒望远镜或小型望远镜就能看到个别的行星。NGC 6231在1654年被意大利天文学家乔瓦尼·巴蒂斯特·霍迪尔纳(Giovanni Battista Hodierna)以Luminosae的名字首次纪录在星表中,但是未见记载于夏尔·梅西耶的天体列表和威廉·赫歇尔的深空天体目录。这个天体在1678年被爱德蒙·哈雷(I.7)、1745年被夏西亚科斯(Jean-Phillippe Loys de Cheseaux)(9)、1751年被尼可拉·路易·拉卡伊(II.13)分别再次独立发现。"", ""question"": ""NGC 6231的经纬度是多少?"", ""<ans>"": """"}
67
+ ],
68
+ tokenizer,
69
+ max_new_tokens=100
70
+ )
71
+ print(res)
72
+
73
+ ```
74
+
75
+ We suggest to use `bmtrain` to finetune CPM-Bee. Also, you can use `accelerate` and `deepspeed` to finetune CPM-Bee. Here we will give a brief example of a training loop:
76
+
77
+ ```python
78
+ from transformers import AutoTokenizer, AutoModelForCausalLM
79
+ from accelerate import Accelerator
80
+ from torch.utils.data import Dataset, DataLoader
81
+
82
+ accelerator = Accelerator()
83
+
84
+ trainset = Dataset() # Make sure trainset.__getitem__() can get data with correct format like {""input"": ""..."", ""<ans>"": """"}
85
+ # for details, you can read https://github.com/OpenBMB/CPM-Bee/tree/main/tutorials/basic_task_finetune
86
+ train_loader = DataLoader(trainset, batch_size=1)
87
+
88
+ tokenizer = AutoTokenizer.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True)
89
+ model = AutoModelForCausalLM.from_pretrained(""openbmb/cpm-bee-10b"", trust_remote_code=True).cuda()
90
+
91
+ optimizer = torch.optim.Adam(model.parameters())
92
+
93
+ model, optimizer, train_loader = accelerator.prepare(
94
+ model, optimizer, train_loader
95
+ )
96
+
97
+ for iter, data in enumerate(train_loader):
98
+ optimizer.zero_grad()
99
+
100
+ # change the data to a trainable format
101
+ input_encoded = tokenizer.prepare_for_finetune(data, max_length=512).to(model.device)
102
+
103
+ outputs = model(**input_encoded)
104
+ loss = outputs.loss
105
+ accelerator.backward(loss)
106
+ optimizer.step()
107
+ ```
108
+ You should design your own parallel and mix_precision training strategy on the basis of it.","{""id"": ""openbmb/cpm-bee-10b"", ""author"": ""openbmb"", ""sha"": ""97ee8f5222c018700a4eb3778e2b873ef6821609"", ""last_modified"": ""2024-02-21 02:30:30+00:00"", ""created_at"": ""2023-05-26 17:52:46+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 192, ""downloads_all_time"": null, ""likes"": 170, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""cpmbee"", ""feature-extraction"", ""text-generation"", ""custom_code"", ""en"", ""zh"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\npipeline_tag: text-generation"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""CpmBeeForCausalLM""], ""auto_map"": {""AutoConfig"": ""configuration_cpmbee.CpmBeeConfig"", ""AutoModel"": ""modeling_cpmbee.CpmBeeForCausalLM"", ""AutoModelForCausalLM"": ""modeling_cpmbee.CpmBeeForCausalLM""}, ""model_type"": ""cpmbee"", ""tokenizer_config"": {}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_cpmbee.CpmBeeForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_cpmbee.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_cpmbee.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test_modeling_cpmbee.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='test_tokenization_cpmbee.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenization_cpmbee.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [""hostea/openbmb-cpm-bee-10b"", ""Cran-May/yugang-cpm-bee-10b"", ""jljiu/gyhh""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-21 02:30:30+00:00"", ""cardData"": ""language:\n- en\n- zh\npipeline_tag: text-generation"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": ""modeling_cpmbee.CpmBeeForCausalLM"", ""pipeline_tag"": ""feature-extraction"", ""processor"": null}, ""_id"": ""6470f1ee9fe78d69a8b580dd"", ""modelId"": ""openbmb/cpm-bee-10b"", ""usedStorage"": 66572549002}",0,,0,,0,,0,,0,"Cran-May/yugang-cpm-bee-10b, hostea/openbmb-cpm-bee-10b, huggingface/InferenceSupport/discussions/new?title=openbmb/cpm-bee-10b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bopenbmb%2Fcpm-bee-10b%5D(%2Fopenbmb%2Fcpm-bee-10b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, jljiu/gyhh",4
dolly-v1-6b_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ databricks/dolly-v1-6b,"---
3
+ license: cc-by-nc-4.0
4
+ datasets:
5
+ - tatsu-lab/alpaca
6
+ language:
7
+ - en
8
+ library_name: transformers
9
+ inference: false
10
+ ---
11
+ # dolly-v1-6b Model Card
12
+
13
+ ## Dolly v2 Is Out!
14
+
15
+ Please try Dolly v2 instead:
16
+
17
+ - https://huggingface.co/databricks/dolly-v2-12b
18
+ - https://huggingface.co/databricks/dolly-v2-7b
19
+ - https://huggingface.co/databricks/dolly-v2-3b
20
+
21
+ ## Summary
22
+
23
+ Databricks’ `dolly-v1-6b`, a large language model ([blog post](https://www.databricks.com/blog/2023/03/24/hello-dolly-democratizing-magic-chatgpt-open-models.html))
24
+ trained on the Databricks machine learning platform, demonstrates that a
25
+ two-years-old [open source model](https://huggingface.co/EleutherAI/gpt-j-6B) can, when subjected to just 30 minutes of fine tuning on a focused corpus of 50k records
26
+ ([Stanford Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html)), exhibit surprisingly high quality instruction following behavior not characteristic of the foundation
27
+ model on which it is based. We believe this finding is important because it demonstrates that the ability to create powerful
28
+ artificial intelligence technologies is vastly more accessible than previously realized.
29
+
30
+ Databricks is committed to ensuring that every organization and individual benefits from the transformative power of artificial intelligence. The Dolly model family represents our first steps along this journey, and we’re excited to share this technology with the world.
31
+
32
+ **Owner**: Databricks, Inc.
33
+
34
+ ## Model Overview
35
+ `dolly-v1-6b` is a 6 billion parameter causal language model created by [Databricks](https://databricks.com/) that is derived from
36
+ [EleutherAI’s](https://www.eleuther.ai/) [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B) (released June 2021) and fine-tuned
37
+ on a ~52K record instruction corpus ([Stanford Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html)) (CC-NC-BY-4.0)
38
+ consisting of question/answer pairs generated using the techniques outlined in the [Self-Instruct](https://arxiv.org/abs/2212.10560) paper.
39
+ The [original version](https://www.databricks.com/blog/2023/03/24/hello-dolly-democratizing-magic-chatgpt-open-models.html) of was Dolly was trained using [deepspeed](https://github.com/microsoft/DeepSpeed) [ZeRO 3](https://github.com/microsoft/DeepSpeed/blob/master/docs/code-docs/source/zero3.rst)
40
+ on the [Databricks Machine Learning Platform](https://www.databricks.com/product/machine-learning) in just 30 minutes (1 epoch) using a single
41
+ [NDasrA100_v4](https://learn.microsoft.com/en-us/azure/virtual-machines/nda100-v4-series) machine with 8x A100 40GB GPUs.
42
+ The most recent `dolly-v1-6b` checkpoint was trained for 10 epochs on the same hardware.
43
+
44
+ Like its base model, `dolly-v1-6b` has six billion parameters consisting of 28 transformer layers with 16 attention heads each.
45
+ It employs [Rotary Position Embedding](https://arxiv.org/abs/2104.09864) (RoPE) and shares the same tokenizer as GPT-3.
46
+ GPT-J was trained on [The Pile](https://huggingface.co/datasets/the_pile), a 400B token dataset of diverse documents designed primarily for text generation tasks.
47
+
48
+ ## Known Limitations
49
+ **`dolly-v1-6b` is not a state-of-the-art generative language model** and, though quantitative benchmarking is ongoing, is not designed to perform
50
+ competitively with more modern model architectures or models subject to larger pretraining corpuses. **It is designed for academic or research purposes, and to encourage model and engineering experimentation.**
51
+
52
+ The Dolly model family is under active development, and so any list of shortcomings is unlikely to be exhaustive, but we include known limitations and misfires here as a means to document and share our preliminary findings with the community. In particular, `dolly-v1-6b` struggles with: syntactically complex prompts, programming problems, mathematical operations, factual errors,
53
+ dates and times, open-ended question answering, hallucination, enumerating lists of specific length, stylistic mimicry, having a sense of humor, etc.
54
+
55
+ ## Training Data, Bias & Objectionable Content
56
+ Like all language models, `dolly-v1-6b` reflects the content and limitations of its training corpuses.
57
+
58
+ - **The Pile**: GPT-J’s pre-training corpus contains content mostly collected from the public internet, and like most web-scale datasets,
59
+ it contains content many users would find objectionable. As such, the model is likely to reflect these shortcomings, potentially overtly
60
+ in the case it is explicitly asked to produce objectionable content, and sometimes subtly, as in the case of biased or harmful implicit
61
+ associations.
62
+
63
+ - **Stanford Alpaca**: The instruction tuning corpus for dolly-6b can be assumed to share many of the limitations. In addition, it is known to
64
+ contain factual inaccuracies, semantic and syntactic irregularities, nonsensical responses, and incorrect mathematical calculations, among
65
+ other data shortcomings. The model outputs will reflect these limitations.
66
+
67
+ Databricks is committed to ongoing research and development efforts to develop helpful, honest and harmless AI technologies that
68
+ maximize the potential of all individuals and organizations.
69
+
70
+ ## Intended Uses
71
+ **`dolly-v1-6b` is intended exclusively for research purposes. We do not recommend using `dolly-v1-6b` in high-risk applications
72
+ (e.g., educational or vocational training, product safety components, or other uses that may impact the well-being of individuals.)**
73
+
74
+ `dolly-v1-6b` is differentiated from its predecessor, GPT-J, in the extent to which it exhibits coherent responses to natural language
75
+ questions and instruction prompts. `dolly-v1-6b` appears to exhibit reasonable (but categorically not state-of-the-art) behavior across a range
76
+ of capabilities, including many of those outlined in the [InstructGPT](https://arxiv.org/abs/2203.02155) paper.
77
+
78
+ We include evocative examples below, but invite the reader to observe that they contain factual inaccuracies or semantic misfires that illustrate some of the model’s shortcomings.
79
+
80
+ ### Brainstorming
81
+ **What are considerations I should keep in mind when planning a backcountry backpacking trip?**
82
+
83
+ When planning a backcountry backpacking trip, there are several considerations to keep in mind. First, make sure you are prepared for any potential weather conditions, including extreme temperatures and lightning storms. Second, consider the route you will take and the terrain that you will be traversing. Third, make sure that you have enough food, water and gear to stay safe and comfortable during your backcountry journey. Fourth, plan for potential wildlife encounters and make sure you have the appropriate safety measures in place. Finally, have an emergency plan in place in case of a medical emergency.
84
+
85
+
86
+ ### Classification
87
+ **Label each of the following as either a scientific concept or a product: Nikon D750, quantum entanglement, CRISPR, and a Macbook Pro.**
88
+
89
+ Nikon D750: Product
90
+
91
+ Quantum entanglement: Scientific Concept
92
+
93
+ CRISPR: Product
94
+
95
+ Macbook Pro: Product
96
+
97
+
98
+ ### Extraction
99
+ **Extract all the people and places from the following passage:
100
+ Basquiat was born on December 22, 1960, in Park Slope, Brooklyn, New York City, the second of four children to Matilde Basquiat (née Andrades, 1934–2008) and Gérard Basquiat (1930–2013). He had an older brother, Max, who died shortly before his birth, and two younger sisters, Lisane (b. 1964) and Jeanine (b. 1967). His father was born in Port-au-Prince, Haiti and his mother was born in Brooklyn to Puerto Rican parents. He was raised Catholic.**
101
+
102
+ People: Basquiat, Matilde Basquiat, Gérard Basquiat, Max Basquiat, Lisane Basquiat, Jeanine Basquiat.
103
+
104
+ Place: Park Slope, Brooklyn, New York City.
105
+
106
+
107
+ ### Generation
108
+ **Write a press release declaring the completion of Atlantis II, a facility designed for long-term human habitation at the bottom of the ocean. Be sure to mention some of its advanced technological features.**
109
+
110
+ Atlantis II, a new facility designed for long-term human habitation at the bottom of the ocean, is now complete! With advanced technological features, it will be a safe and comfortable living environment for generations to come. From self-sustaining ecosystems to advanced robotics, the facility offers a range of opportunities for exploration, discovery and exploration.
111
+
112
+ Atlantis II will be a home for a wide range of species and will serve as a hub for ocean research, exploration and exploration, bringing together a diverse team of scientists, engineers, biologists, and divers to uncover the mysteries of the deep.
113
+
114
+ ### Summarization
115
+ **Give me a one line summary of this:
116
+ Coffee is one of the most widely consumed beverages in the world. It has primarily consumed due to its stimulant effect and unique taste since the ancient times. Afterwards, its consumption has been historically associated with a lower risk of some diseases such as type 2 diabetes mellitus, obesity, cardiovascular disease and some type of cancer and thus it has also consumed due to health benefits. It contains many bioactive compounds such as caffeine, chlorogenic acids and diterpenoid alcohols which have so far been associated with many potential health benefits. For example, caffeine reduces risk of developing neurodegenerative disease and chlorogenic acids (CGA) and diterpene alcohols have many health benefits such as antioxidant and chemo-preventive. Coffee also have harmful effects. For example, diterpenoid alcohols increases serum homocysteine and cholesterol levels and thus it has adverse effects on cardiovascular system. Overall, the study that supports the health benefits of coffee is increasing. But, it is thought-provoking that the association with health benefits of coffee consumption and frequency at different levels in each study. For this reason, we aimed to examine the health effect of the coffee and how much consumption is to investigate whether it meets the claimed health benefits.**
117
+
118
+ Coffee is one of the most widely consumed beverages in the world and has historically been associated with a lower risk of some diseases such as type 2 diabetes mellitus, obesity, cardiovascular disease and some type of cancer. However, there are mixed results in the literature and further research is required to determine the health benefits of coffee consumption.
119
+
120
+
121
+ ### Rephrasing
122
+ **Give me a different way to say the following to a 4 year old: ""Son, this is the last time I'm going to tell you. Go to bed!""**
123
+
124
+ ""Goodnight, Son. Sleep tight and I'll see you in the morning!""
125
+
126
+
127
+ ### Closed QA
128
+ **I'm going to give you a passage from the book Neuromancer and I'd like you to answer the following question: What is the tool that allows Case to access the matrix?
129
+ Case was twenty-four. At twenty-two, he'd been a cowboy, a rustler, one of the best in the Sprawl. He'd been trained by the best, by McCoy Pauley and Bobby Quine, legends in the biz. He'd operated on an almost permanent adrenaline high, a byproduct of youth and proficiency, jacked into a custom cyberspace deck that projected his disembodied consciousness into the consensual hallucination that was the matrix.**
130
+
131
+ The tool that allows Case to access the matrix is his cyberspace deck, which allows him to manipulate and interact with the virtual world around him. It enables him to access information from various sources and interact with the people in the matrix, and it is one of the main weapons of Case's cyberworld.
132
+
133
+ ### Open QA
134
+ **What is the literary style of Jorge Luis Borges?**
135
+
136
+ Jorge Luis Borges's literary style is often described as magical realism. He often creates stories that combine fictional characters with real life elements and use the juxtaposition of these elements to create a sense of surrealism. He often uses allegory and symbolism to explore complex philosophical ideas and explore questions of human perception and understanding. His writing is characterized by a playful and inventive use of language, with characters often being metaphors or symbols for larger, more abstract concepts.
137
+
138
+ ## Usage
139
+
140
+ ### Load Model & Tokenizer
141
+ ```python
142
+ import numpy as np
143
+ from transformers import (
144
+ AutoModelForCausalLM,
145
+ AutoTokenizer,
146
+ PreTrainedModel,
147
+ PreTrainedTokenizer
148
+ )
149
+
150
+ tokenizer = AutoTokenizer.from_pretrained(""databricks/dolly-v1-6b"", padding_side=""left"")
151
+ model = AutoModelForCausalLM.from_pretrained(""databricks/dolly-v1-6b"", device_map=""auto"", trust_remote_code=True)
152
+ ```
153
+
154
+ ### Generate Text
155
+ ```python
156
+ PROMPT_FORMAT = """"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
157
+
158
+ ### Instruction:
159
+ {instruction}
160
+
161
+ ### Response:
162
+ """"""
163
+
164
+ def generate_response(instruction: str, *, model: PreTrainedModel, tokenizer: PreTrainedTokenizer,
165
+ do_sample: bool = True, max_new_tokens: int = 256, top_p: float = 0.92, top_k: int = 0, **kwargs) -> str:
166
+ input_ids = tokenizer(PROMPT_FORMAT.format(instruction=instruction), return_tensors=""pt"").input_ids.to(""cuda"")
167
+
168
+ # each of these is encoded to a single token
169
+ response_key_token_id = tokenizer.encode(""### Response:"")[0]
170
+ end_key_token_id = tokenizer.encode(""### End"")[0]
171
+
172
+ gen_tokens = model.generate(input_ids, pad_token_id=tokenizer.pad_token_id, eos_token_id=end_key_token_id,
173
+ do_sample=do_sample, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, **kwargs)[0].cpu()
174
+
175
+ # find where the response begins
176
+ response_positions = np.where(gen_tokens == response_key_token_id)[0]
177
+
178
+ if len(response_positions) >= 0:
179
+ response_pos = response_positions[0]
180
+
181
+ # find where the response ends
182
+ end_pos = None
183
+ end_positions = np.where(gen_tokens == end_key_token_id)[0]
184
+ if len(end_positions) > 0:
185
+ end_pos = end_positions[0]
186
+
187
+ return tokenizer.decode(gen_tokens[response_pos + 1 : end_pos]).strip()
188
+
189
+ return None
190
+
191
+ # Sample similar to: ""Excited to announce the release of Dolly, a powerful new language model from Databricks! #AI #Databricks""
192
+ generate_response(""Write a tweet announcing Dolly, a large language model from Databricks."", model=model, tokenizer=tokenizer)
193
+ ```
194
+
195
+ ### Benchmark Metrics
196
+
197
+ Below you'll find various models benchmark performance on the [EleutherAI LLM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness)
198
+ model results are sorted by geometric mean to produce an intelligible ordering. These results demonstrate that Dolly is not state of the art, as we describe
199
+ above, but also point to an interesting observation. Namely, Dolly is only marginally better (and in the case of Winogrande worse) and its basemodel GPT-J-6B.
200
+ Despite this fact, the qualitative behavior of Dolly is materially different from the underlying model ([try it yourself](https://huggingface.co/EleutherAI/gpt-j-6B) on Hugging Face!),
201
+ which points to meaningful limitations of the existing evaluation benchmarks for measuring the quality of generative models.
202
+
203
+ | model | openbookqa | arc_easy | winogrande | hellaswag | arc_challenge | piqa | boolq |
204
+ | --------------------------- | ------------ | ---------- | ------------ | ----------- | --------------- | -------- | ---------|
205
+ | cerebras/Cerebras-GPT-13B | 0.36 | 0.598906 | 0.607735 | 0.593109 | 0.325939 | 0.749728 | 0.611621 |
206
+ | EleutherAI/gpt-j-6B | 0.382 | 0.621633 | 0.651144 | 0.662617 | 0.363481 | 0.761153 | 0.655963 |
207
+ | dolly-v1-6b (1 epoch) | 0.428 | 0.608586 | 0.633781 | 0.650568 | 0.377133 | 0.761697 | 0.69633 |
208
+ | dolly-v1-6b (10 epochs) | 0.41 | 0.62963 | 0.643252 | 0.676758 | 0.384812 | 0.773667 | 0.687768 |
209
+ | EleutherAI/gpt-neox-20b | 0.402 | 0.683923 | 0.656669 | 0.7142 | 0.408703 | 0.784004 | 0.695413 |
210
+
211
+ # Citation
212
+
213
+ ```
214
+ @online{DatabricksBlog2023DollyV1,
215
+ author = {Mike Conover and Matt Hayes and Ankit Mathur and Xiangrui Meng and Jianwei Xie and Jun Wan and Ali Ghodsi and Patrick Wendell and Matei Zaharia},
216
+ title = {Hello Dolly: Democratizing the magic of ChatGPT with open models},
217
+ year = {2023},
218
+ url = {https://www.databricks.com/blog/2023/03/24/hello-dolly-democratizing-magic-chatgpt-open-models.html},
219
+ urldate = {2023-06-30}
220
+ }
221
+ ```
222
+
223
+ # Happy Hacking!","{""id"": ""databricks/dolly-v1-6b"", ""author"": ""databricks"", ""sha"": ""c9a85b3a322b402e20c839c702c725afe0cb454d"", ""last_modified"": ""2023-06-30 18:32:22+00:00"", ""created_at"": ""2023-03-23 19:04:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3616, ""downloads_all_time"": null, ""likes"": 310, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gptj"", ""text-generation"", ""en"", ""dataset:tatsu-lab/alpaca"", ""arxiv:2212.10560"", ""arxiv:2104.09864"", ""arxiv:2203.02155"", ""license:cc-by-nc-4.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- tatsu-lab/alpaca\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\ninference: false"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPTJForCausalLM""], ""model_type"": ""gptj"", ""tokenizer_config"": {""bos_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""eos_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}, ""pad_token"": null, ""unk_token"": {""__type"": ""AddedToken"", ""content"": ""<|endoftext|>"", ""lstrip"": false, ""normalized"": true, ""rstrip"": false, ""single_word"": false}}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""Sharathhebbar24/One-stop-for-Open-source-models"", ""genai-impact/ecologits-calculator"", ""chkla/PromptCardsPlayground"", ""diy2023/databricks-dolly-v1-6b"", ""deepakpal/databricks-dolly-v1-6b"", ""TechWithAnirudh/databricks-dolly-v1-6b"", ""K00B404/One-stop-till-you-drop""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-06-30 18:32:22+00:00"", ""cardData"": ""datasets:\n- tatsu-lab/alpaca\nlanguage:\n- en\nlibrary_name: transformers\nlicense: cc-by-nc-4.0\ninference: false"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""641ca2c78357e6ae9b1c62c4"", ""modelId"": ""databricks/dolly-v1-6b"", ""usedStorage"": 12219365353}",0,,0,,0,,0,,0,"K00B404/One-stop-till-you-drop, Sharathhebbar24/One-stop-for-Open-source-models, TechWithAnirudh/databricks-dolly-v1-6b, chkla/PromptCardsPlayground, deepakpal/databricks-dolly-v1-6b, diy2023/databricks-dolly-v1-6b, genai-impact/ecologits-calculator, huggingface/InferenceSupport/discussions/new?title=databricks/dolly-v1-6b&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bdatabricks%2Fdolly-v1-6b%5D(%2Fdatabricks%2Fdolly-v1-6b)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",8
faster-whisper-large-v3_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ Systran/faster-whisper-large-v3,"---
3
+ language:
4
+ - en
5
+ - zh
6
+ - de
7
+ - es
8
+ - ru
9
+ - ko
10
+ - fr
11
+ - ja
12
+ - pt
13
+ - tr
14
+ - pl
15
+ - ca
16
+ - nl
17
+ - ar
18
+ - sv
19
+ - it
20
+ - id
21
+ - hi
22
+ - fi
23
+ - vi
24
+ - he
25
+ - uk
26
+ - el
27
+ - ms
28
+ - cs
29
+ - ro
30
+ - da
31
+ - hu
32
+ - ta
33
+ - 'no'
34
+ - th
35
+ - ur
36
+ - hr
37
+ - bg
38
+ - lt
39
+ - la
40
+ - mi
41
+ - ml
42
+ - cy
43
+ - sk
44
+ - te
45
+ - fa
46
+ - lv
47
+ - bn
48
+ - sr
49
+ - az
50
+ - sl
51
+ - kn
52
+ - et
53
+ - mk
54
+ - br
55
+ - eu
56
+ - is
57
+ - hy
58
+ - ne
59
+ - mn
60
+ - bs
61
+ - kk
62
+ - sq
63
+ - sw
64
+ - gl
65
+ - mr
66
+ - pa
67
+ - si
68
+ - km
69
+ - sn
70
+ - yo
71
+ - so
72
+ - af
73
+ - oc
74
+ - ka
75
+ - be
76
+ - tg
77
+ - sd
78
+ - gu
79
+ - am
80
+ - yi
81
+ - lo
82
+ - uz
83
+ - fo
84
+ - ht
85
+ - ps
86
+ - tk
87
+ - nn
88
+ - mt
89
+ - sa
90
+ - lb
91
+ - my
92
+ - bo
93
+ - tl
94
+ - mg
95
+ - as
96
+ - tt
97
+ - haw
98
+ - ln
99
+ - ha
100
+ - ba
101
+ - jw
102
+ - su
103
+ - yue
104
+ tags:
105
+ - audio
106
+ - automatic-speech-recognition
107
+ license: mit
108
+ library_name: ctranslate2
109
+ ---
110
+
111
+ # Whisper large-v3 model for CTranslate2
112
+
113
+ This repository contains the conversion of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format.
114
+
115
+ This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/systran/faster-whisper).
116
+
117
+ ## Example
118
+
119
+ ```python
120
+ from faster_whisper import WhisperModel
121
+
122
+ model = WhisperModel(""large-v3"")
123
+
124
+ segments, info = model.transcribe(""audio.mp3"")
125
+ for segment in segments:
126
+ print(""[%.2fs -> %.2fs] %s"" % (segment.start, segment.end, segment.text))
127
+ ```
128
+
129
+ ## Conversion details
130
+
131
+ The original model was converted with the following command:
132
+
133
+ ```
134
+ ct2-transformers-converter --model openai/whisper-large-v3 --output_dir faster-whisper-large-v3 \
135
+ --copy_files tokenizer.json preprocessor_config.json --quantization float16
136
+ ```
137
+
138
+ Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html).
139
+
140
+ ## More information
141
+
142
+ **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-large-v3).**
143
+ ","{""id"": ""Systran/faster-whisper-large-v3"", ""author"": ""Systran"", ""sha"": ""edaa852ec7e145841d8ffdb056a99866b5f0a478"", ""last_modified"": ""2023-11-23 09:41:12+00:00"", ""created_at"": ""2023-11-23 09:34:20+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 752912, ""downloads_all_time"": null, ""likes"": 376, ""library_name"": ""ctranslate2"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""ctranslate2"", ""audio"", ""automatic-speech-recognition"", ""en"", ""zh"", ""de"", ""es"", ""ru"", ""ko"", ""fr"", ""ja"", ""pt"", ""tr"", ""pl"", ""ca"", ""nl"", ""ar"", ""sv"", ""it"", ""id"", ""hi"", ""fi"", ""vi"", ""he"", ""uk"", ""el"", ""ms"", ""cs"", ""ro"", ""da"", ""hu"", ""ta"", ""no"", ""th"", ""ur"", ""hr"", ""bg"", ""lt"", ""la"", ""mi"", ""ml"", ""cy"", ""sk"", ""te"", ""fa"", ""lv"", ""bn"", ""sr"", ""az"", ""sl"", ""kn"", ""et"", ""mk"", ""br"", ""eu"", ""is"", ""hy"", ""ne"", ""mn"", ""bs"", ""kk"", ""sq"", ""sw"", ""gl"", ""mr"", ""pa"", ""si"", ""km"", ""sn"", ""yo"", ""so"", ""af"", ""oc"", ""ka"", ""be"", ""tg"", ""sd"", ""gu"", ""am"", ""yi"", ""lo"", ""uz"", ""fo"", ""ht"", ""ps"", ""tk"", ""nn"", ""mt"", ""sa"", ""lb"", ""my"", ""bo"", ""tl"", ""mg"", ""as"", ""tt"", ""haw"", ""ln"", ""ha"", ""ba"", ""jw"", ""su"", ""yue"", ""license:mit"", ""region:us""], ""pipeline_tag"": ""automatic-speech-recognition"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language:\n- en\n- zh\n- de\n- es\n- ru\n- ko\n- fr\n- ja\n- pt\n- tr\n- pl\n- ca\n- nl\n- ar\n- sv\n- it\n- id\n- hi\n- fi\n- vi\n- he\n- uk\n- el\n- ms\n- cs\n- ro\n- da\n- hu\n- ta\n- 'no'\n- th\n- ur\n- hr\n- bg\n- lt\n- la\n- mi\n- ml\n- cy\n- sk\n- te\n- fa\n- lv\n- bn\n- sr\n- az\n- sl\n- kn\n- et\n- mk\n- br\n- eu\n- is\n- hy\n- ne\n- mn\n- bs\n- kk\n- sq\n- sw\n- gl\n- mr\n- pa\n- si\n- km\n- sn\n- yo\n- so\n- af\n- oc\n- ka\n- be\n- tg\n- sd\n- gu\n- am\n- yi\n- lo\n- uz\n- fo\n- ht\n- ps\n- tk\n- nn\n- mt\n- sa\n- lb\n- my\n- bo\n- tl\n- mg\n- as\n- tt\n- haw\n- ln\n- ha\n- ba\n- jw\n- su\n- yue\nlibrary_name: ctranslate2\nlicense: mit\ntags:\n- audio\n- automatic-speech-recognition"", ""widget_data"": null, ""model_index"": null, ""config"": {}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocabulary.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""mrfakename/E2-F5-TTS"", ""devilent2/whisper-v3-zero"", ""ThreadAbort/E2-F5-TTS"", ""abidlabs/E2-F5-TTS"", ""kevinwang676/E2-F5-TTS"", ""hynt/F5-TTS-Vietnamese-100h"", ""emilalvaro/E2-F5-TTS"", ""cocktailpeanut/E2-F5-TTS"", ""Karayakar/F5-TTS"", ""eniolaa/voice-chat-with-llm"", ""devilent2/whisper-v3-zero-dev"", ""ankush13r/faster-whisper"", ""redradios/E2-F5-TTS"", ""SpongeBobFan2002/E2-F5-TTS"", ""Athspi/AutoSubGen"", ""Neogrconpet/Voice-Chat"", ""devilent2/whisper-v3-zero-canary"", ""JohnInizio/persona-chat-demo"", ""Shamik3/tts"", ""YetNak/E2-F5-TTSi"", ""Hemant0000/Voice_cloning"", ""Kutches/F5-TTS"", ""M4xjunior/E2-F5-TTS"", ""ghaafs/vocacare"", ""fedirz/faster-whisper-server"", ""marcosegura34/Spanish-F5"", ""Athspi/Ai-audio"", ""3morrrrr/Ai-Voice-Cloner"", ""romgomez101/E2-F5-TTS"", ""eBlessings/E2-F5-TTS"", ""slabstech/dhwani-internal-api-server"", ""lshzhm/DeepAudio-V1"", ""doannguyenmmo/VI-TEXT-TO-SPEECH"", ""gaganyatri/tts-indic-f5"", ""rodrigodocarmo/whisper"", ""h2m/whisper-v3-zero"", ""Ntabukiraniro/LiyaGDEXA"", ""devilent2/whisper-v3-cpu"", ""khursani8/test_msam"", ""dhanyabahadur/voice-chat-with-llm"", ""Shmuel/ivrit-ai-whisper-13-v2-e2"", ""hbs2/dadk"", ""karl48071/voice-chat-with-llm"", ""roudayna77/Voice_Chatwith_LLM"", ""sirne/voice_chat_with_llm"", ""syedmudassir16/emotion_based_music_voice_chat"", ""sims2k/Saul-GDPR"", ""vazhaju/voice-chat-with-llm"", ""syedmudassir16/voice-chat-with-llm"", ""klaus8/voice-chat-with-llm"", ""klaus8/Voice-Chat-With-TOM"", ""bhavana55/voice-chat-with-llm"", ""HeaHea0122/voice-chat-with-llm"", ""naresH112/VOICE"", ""naresH112/VoiceAssistant"", ""Dheerajb14/llm"", ""MariamMetwlly/voice-chatbot1"", ""StudyVN/voice-chat-with-llm"", ""yeshaswinik/llm-speechbot"", ""DelinaresMassates/E2-F5-TTS"", ""lucifer2418/Genache-Project"", ""BadriNarayanan/gradio-text-to-speech-app"", ""mohaimenulshawon/E2-F5-TTS"", ""Dragunflie-420/E2-F5-TTS"", ""d-delaurier/E2-F5-TTS"", ""mayduamengungu/E2-F5-TTS"", ""YetNak/E2-F5-TTS"", ""unity0090/E2-F5-TTS"", ""rainnee/E2-F5-TTS"", ""rainnee0925/E2-F5-TTS"", ""YetNak/E2-F5-TTSgi"", ""Nightwing25/E2-F5-TTS"", ""Masterdqqq/E2-F5-TTSgb"", ""prabaerode/zero-shot-tts"", ""ivanusto/E2-F5-TTS"", ""EronSamez/E2-F5-TTS-Pt-Br"", ""vuxuanhoan/E2-F5-TTS"", ""RioTus/aivoice"", ""SpongeBobFan2002/E2-F5-TTSII"", ""emilalvaro/E2-F5-TTS-EMILIO-LLM"", ""NemesisAlm/E2-F5-TTS"", ""pengjoe12802/E2-F5-TTS"", ""goku1911/E2-F5-TTS"", ""jazy707/talk-to-me-baby"", ""viji514/MindsAI"", ""BMCVRN/E2-F5-TTS"", ""Sush0677/Voice-assistant"", ""M4xjunior/locseed"", ""EronSamez/E2-F5-TTS-portugues"", ""smjkshp123/F5-TTS"", ""guillaume42/secretariattelephonique"", ""guillaume42/secretariattelephonique2"", ""Arunak05/arun-voice-chat"", ""DefenseIntelligenceAgency/Voice-Cloner"", ""redfernstech/voice-chat-with-llm"", ""sonidivya/Voice-Chat-With-AI"", ""Vraj26/Bot"", ""goldenlifeds/Real-Time-Voice-Changer"", ""Bantikumar/tts-app"", ""johnc-transforms/E2-F5-TTS""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-11-23 09:41:12+00:00"", ""cardData"": ""language:\n- en\n- zh\n- de\n- es\n- ru\n- ko\n- fr\n- ja\n- pt\n- tr\n- pl\n- ca\n- nl\n- ar\n- sv\n- it\n- id\n- hi\n- fi\n- vi\n- he\n- uk\n- el\n- ms\n- cs\n- ro\n- da\n- hu\n- ta\n- 'no'\n- th\n- ur\n- hr\n- bg\n- lt\n- la\n- mi\n- ml\n- cy\n- sk\n- te\n- fa\n- lv\n- bn\n- sr\n- az\n- sl\n- kn\n- et\n- mk\n- br\n- eu\n- is\n- hy\n- ne\n- mn\n- bs\n- kk\n- sq\n- sw\n- gl\n- mr\n- pa\n- si\n- km\n- sn\n- yo\n- so\n- af\n- oc\n- ka\n- be\n- tg\n- sd\n- gu\n- am\n- yi\n- lo\n- uz\n- fo\n- ht\n- ps\n- tk\n- nn\n- mt\n- sa\n- lb\n- my\n- bo\n- tl\n- mg\n- as\n- tt\n- haw\n- ln\n- ha\n- ba\n- jw\n- su\n- yue\nlibrary_name: ctranslate2\nlicense: mit\ntags:\n- audio\n- automatic-speech-recognition"", ""transformersInfo"": null, ""_id"": ""655f1c9c203bce21fe0488f8"", ""modelId"": ""Systran/faster-whisper-large-v3"", ""usedStorage"": 3102942254}",0,,0,,0,,0,,0,"Karayakar/F5-TTS, Neogrconpet/Voice-Chat, SpongeBobFan2002/E2-F5-TTS, ThreadAbort/E2-F5-TTS, abidlabs/E2-F5-TTS, ankush13r/faster-whisper, devilent2/whisper-v3-zero, devilent2/whisper-v3-zero-canary, emilalvaro/E2-F5-TTS, huggingface/InferenceSupport/discussions/new?title=Systran/faster-whisper-large-v3&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5BSystran%2Ffaster-whisper-large-v3%5D(%2FSystran%2Ffaster-whisper-large-v3)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, hynt/F5-TTS-Vietnamese-100h, mrfakename/E2-F5-TTS, redradios/E2-F5-TTS",13
gemma-scope_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ google/gemma-scope,"---
3
+ license: cc-by-4.0
4
+ library_name: saelens
5
+ ---
6
+
7
+ # Gemma Scope:
8
+
9
+ ![](gemma_scope.gif)
10
+
11
+ This is a landing page for **Gemma Scope**, a comprehensive, open suite of sparse autoencoders for Gemma 2 9B and 2B. Sparse Autoencoders are a ""microscope"" of sorts that can help us break down a model’s internal activations into the underlying concepts, just as biologists use microscopes to study the individual cells of plants and animals.
12
+
13
+ **There are no model weights in this repo. If you are looking for them, please visit one of our repos:**
14
+
15
+ - https://huggingface.co/google/gemma-scope-2b-pt-res
16
+ - https://huggingface.co/google/gemma-scope-2b-pt-mlp
17
+ - https://huggingface.co/google/gemma-scope-2b-pt-att
18
+ - https://huggingface.co/google/gemma-scope-9b-pt-res
19
+ - https://huggingface.co/google/gemma-scope-9b-pt-mlp
20
+ - https://huggingface.co/google/gemma-scope-9b-pt-att
21
+ - https://huggingface.co/google/gemma-scope-27b-pt-res
22
+ - https://huggingface.co/google/gemma-scope-9b-it-res
23
+ - https://huggingface.co/google/gemma-scope-2b-pt-transcoders
24
+
25
+ [This tutorial](https://colab.research.google.com/drive/17dQFYUYnuKnP6OwQPH9v_GSYUW5aj-Rp?ts=66a77041) has instructions on how to load the SAEs, and [this tutorial](https://colab.research.google.com/drive/1PlFzI_PWGTN9yCQLuBcSuPJUjgHL7GiD) explains and implements JumpReLU SAE training in PyTorch and JAX.
26
+
27
+ # Key links:
28
+
29
+ ![](gs-demo-tweet.gif)
30
+ - Check out the [interactive Gemma Scope demo](https://www.neuronpedia.org/gemma-scope) made by [Neuronpedia](https://www.neuronpedia.org/).
31
+ - (NEW!) We have a colab notebook tutorial for JumpReLU SAE training in JAX and PyTorch [here](https://colab.research.google.com/drive/1PlFzI_PWGTN9yCQLuBcSuPJUjgHL7GiD).
32
+ - Learn more about Gemma Scope in our [Google DeepMind blog post](https://deepmind.google/discover/blog/gemma-scope-helping-the-safety-community-shed-light-on-the-inner-workings-of-language-models).
33
+ - Check out our [Google Colab notebook tutorial](https://colab.research.google.com/drive/17dQFYUYnuKnP6OwQPH9v_GSYUW5aj-Rp?ts=66a77041) for how to use Gemma Scope.
34
+ - Read [the Gemma Scope technical report](https://arxiv.org/abs/2408.05147).
35
+ - Check out [Mishax](https://github.com/google-deepmind/mishax), a GDM internal tool that we used in this project to expose the internal activations inside Gemma 2 models.
36
+
37
+ # Full weight set:
38
+
39
+ The full list of SAEs we trained at which sites and layers are linked from the following table, adapted from Figure 1 of our technical report:
40
+
41
+ | <big>Gemma 2 Model</big> | <big>SAE Width</big> | <big>Attention</big> | <big>MLP</big> | <big>Residual</big> | <big>Tokens</big> |
42
+ |---------------|-----------|-----------|-----|----------|----------|
43
+ | 2.6B PT<br>(26 layers) | 2^14 ≈ 16.4K | [All](https://huggingface.co/google/gemma-scope-2b-pt-att) | [All](https://huggingface.co/google/gemma-scope-2b-pt-mlp) | [All](https://huggingface.co/google/gemma-scope-2b-pt-res)[+](https://huggingface.co/google/gemma-scope-2b-pt-transcoders) | 4B |
44
+ | | 2^15 | | | {[12](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_12/width_32k/)} | 8B |
45
+ | | 2^16 | [All](https://huggingface.co/google/gemma-scope-2b-pt-att) | [All](https://huggingface.co/google/gemma-scope-2b-pt-mlp) | [All](https://huggingface.co/google/gemma-scope-2b-pt-res) | 8B |
46
+ | | 2^17 | | | {[12](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_12/width_131k/)} | 8B |
47
+ | | 2^18 | | | {[12](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_12/width_262k/)} | 8B |
48
+ | | 2^19 | | | {[12](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_12/width_524k/)} | 8B |
49
+ | | 2^20 ≈ 1M | | | {[5](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_5/width_1m/), [12](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_12/width_1m/), [19](https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_19/width_1m/)} | 16B |
50
+ | 9B PT<br>(42 layers) | 2^14 | [All](https://huggingface.co/google/gemma-scope-9b-pt-att) | [All](https://huggingface.co/google/gemma-scope-9b-pt-mlp) | [All](https://huggingface.co/google/gemma-scope-9b-pt-res) | 4B |
51
+ | | 2^15 | | | {[20](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_20/width_32k/)} | 8B |
52
+ | | 2^16 | | | {[20](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_20/width_65k/)} | 8B |
53
+ | | 2^17 | [All](https://huggingface.co/google/gemma-scope-9b-pt-att) | [All](https://huggingface.co/google/gemma-scope-9b-pt-mlp) | [All](https://huggingface.co/google/gemma-scope-9b-pt-res) | 8B |
54
+ | | 2^18 | | | {[20](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_20/width_262k/)} | 8B |
55
+ | | 2^19 | | | {[20](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_20/width_524k/)} | 8B |
56
+ | | 2^20 | | | {[9](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_9/width_1m/), [20](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_20/width_1m/), [31](https://huggingface.co/google/gemma-scope-9b-pt-res/tree/main/layer_31/width_1m/)} | 16B |
57
+ | 27B PT<br>(46 layers) | 2^17 | | | {[10](https://huggingface.co/google/gemma-scope-27b-pt-res/tree/main/layer_10/width_131k/), [22](https://huggingface.co/google/gemma-scope-27b-pt-res/tree/main/layer_22/width_131k/), [34](https://huggingface.co/google/gemma-scope-27b-pt-res/tree/main/layer_34/width_131k/)} | 8B |
58
+ | 9B IT<br>(42 layers) | 2^14 | | | {[9](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_9/width_16k/), [20](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_20/width_16k/), [31](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_31/width_16k/)} | 4B |
59
+ | | 2^17 | | | {[9](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_9/width_131k/), [20](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_20/width_131k/), [31](https://huggingface.co/google/gemma-scope-9b-it-res/tree/main/layer_31/width_131k/)} | 8B |
60
+
61
+ # Which SAE is in the [Neuronpedia demo](https://www.neuronpedia.org/gemma-scope)?
62
+
63
+ https://huggingface.co/google/gemma-scope-2b-pt-res/tree/main/layer_20/width_16k/average_l0_71
64
+
65
+ # Citation
66
+
67
+ ```
68
+ @misc{lieberum2024gemmascopeopensparse,
69
+ title={Gemma Scope: Open Sparse Autoencoders Everywhere All At Once on Gemma 2},
70
+ author={Tom Lieberum and Senthooran Rajamanoharan and Arthur Conmy and Lewis Smith and Nicolas Sonnerat and Vikrant Varma and János Kramár and Anca Dragan and Rohin Shah and Neel Nanda},
71
+ year={2024},
72
+ eprint={2408.05147},
73
+ archivePrefix={arXiv},
74
+ primaryClass={cs.LG},
75
+ url={https://arxiv.org/abs/2408.05147},
76
+ }
77
+ ```
78
+
79
+ Paper link: https://arxiv.org/abs/2408.05147
80
+ ","{""id"": ""google/gemma-scope"", ""author"": ""google"", ""sha"": ""adbe8eaffaa83b8fbf2d052acfb502d36965968b"", ""last_modified"": ""2024-08-29 15:15:17+00:00"", ""created_at"": ""2024-07-30 15:42:39+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 0, ""downloads_all_time"": null, ""likes"": 161, ""library_name"": ""saelens"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""saelens"", ""arxiv:2408.05147"", ""license:cc-by-4.0"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""library_name: saelens\nlicense: cc-by-4.0"", ""widget_data"": null, ""model_index"": null, ""config"": null, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='LICENSE', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gemma_scope.gif', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='gs-demo-tweet.gif', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-29 15:15:17+00:00"", ""cardData"": ""library_name: saelens\nlicense: cc-by-4.0"", ""transformersInfo"": null, ""_id"": ""66a909ef4122a1a25672b1fe"", ""modelId"": ""google/gemma-scope"", ""usedStorage"": 10756589}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=google/gemma-scope&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bgoogle%2Fgemma-scope%5D(%2Fgoogle%2Fgemma-scope)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
gpt2_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884e9174caa15cc776c3a0a4cbe364d9da4b07a64ae62c11d5c219f2d96fe8e4
3
+ size 10634816
gte-Qwen2-7B-instruct_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
The diff for this file is too large to render. See raw diff
 
hallucination_evaluation_model_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ vectara/hallucination_evaluation_model,"---
3
+ language: en
4
+ license: apache-2.0
5
+ base_model: google/flan-t5-base
6
+ pipline_tag: text-classficiation
7
+ ---
8
+
9
+ <img referrerpolicy=""no-referrer-when-downgrade"" src=""https://static.scarf.sh/a.png?x-pxid=5f53f560-5ba6-4e73-917b-c7049e9aea2c"" />
10
+
11
+ <img src=""https://huggingface.co/vectara/hallucination_evaluation_model/resolve/main/candle.png"" width=""50"" height=""50"" style=""display: inline;""> In Loving memory of Simon Mark Hughes...
12
+
13
+ **Highlights**:
14
+ * HHEM-2.1-Open shows a significant improvement over HHEM-1.0.
15
+ * HHEM-2.1-Open outperforms GPT-3.5-Turbo and even GPT-4.
16
+ * HHEM-2.1-Open can be run on consumer-grade hardware, occupying less than 600MB RAM space at 32-bit precision and elapsing around 1.5 seconds for a 2k-token input on a modern x86 CPU.
17
+
18
+ > HHEM-2.1-Open introduces breaking changes to the usage. Please update your code according to the [new usage](#using-hhem-21-open) below. We are working making it compatible with HuggingFace's Inference Endpoint. We apologize for the inconvenience.
19
+
20
+ HHEM-2.1-Open is a major upgrade to [HHEM-1.0-Open](https://huggingface.co/vectara/hallucination_evaluation_model/tree/hhem-1.0-open) created by [Vectara](https://vectara.com) in November 2023. The HHEM model series are designed for detecting hallucinations in LLMs. They are particularly useful in the context of building retrieval-augmented-generation (RAG) applications where a set of facts is summarized by an LLM, and HHEM can be used to measure the extent to which this summary is factually consistent with the facts.
21
+
22
+ If you are interested to learn more about RAG or experiment with Vectara, you can [sign up](https://console.vectara.com/signup/?utm_source=huggingface&utm_medium=space&utm_term=hhem-model&utm_content=console&utm_campaign=) for a Vectara account.
23
+
24
+ [**Try out HHEM-2.1-Open from your browser without coding** ](http://13.57.203.109:3000/)
25
+
26
+ ## Hallucination Detection 101
27
+ By ""hallucinated"" or ""factually inconsistent"", we mean that a text (hypothesis, to be judged) is not supported by another text (evidence/premise, given). You **always need two** pieces of text to determine whether a text is hallucinated or not. When applied to RAG (retrieval augmented generation), the LLM is provided with several pieces of text (often called facts or context) retrieved from some dataset, and a hallucination would indicate that the summary (hypothesis) is not supported by those facts (evidence).
28
+
29
+ A common type of hallucination in RAG is **factual but hallucinated**.
30
+ For example, given the premise _""The capital of France is Berlin""_, the hypothesis _""The capital of France is Paris""_ is hallucinated -- although it is true in the world knowledge. This happens when LLMs do not generate content based on the textual data provided to them as part of the RAG retrieval process, but rather generate content based on their pre-trained knowledge.
31
+
32
+ Additionally, hallucination detection is ""asymmetric"" or is not commutative. For example, the hypothesis _""I visited Iowa""_ is considered hallucinated given the premise _""I visited the United States""_, but the reverse is consistent.
33
+
34
+ ## Using HHEM-2.1-Open
35
+
36
+ > HHEM-2.1 has some breaking change from HHEM-1.0. Your code that works with HHEM-1 (November 2023) will not work anymore. While we are working on backward compatibility, please follow the new usage instructions below.
37
+
38
+ Here we provide several ways to use HHEM-2.1-Open in the `transformers` library.
39
+
40
+ > You may run into a warning message that ""Token indices sequence length is longer than the specified maximum sequence length"". Please ignore it which is inherited from the foundation, T5-base.
41
+
42
+ ### Using with `AutoModel`
43
+
44
+ This is the most end-to-end and out-of-the-box way to use HHEM-2.1-Open. It takes a list of pairs of (premise, hypothesis) as the input and returns a score between 0 and 1 for each pair where 0 means that the hypothesis is not evidenced at all by the premise and 1 means the hypothesis is fully supported by the premise.
45
+
46
+
47
+ ```python
48
+ from transformers import AutoModelForSequenceClassification
49
+
50
+ pairs = [ # Test data, List[Tuple[str, str]]
51
+ (""The capital of France is Berlin."", ""The capital of France is Paris.""), # factual but hallucinated
52
+ ('I am in California', 'I am in United States.'), # Consistent
53
+ ('I am in United States', 'I am in California.'), # Hallucinated
54
+ (""A person on a horse jumps over a broken down airplane."", ""A person is outdoors, on a horse.""),
55
+ (""A boy is jumping on skateboard in the middle of a red bridge."", ""The boy skates down the sidewalk on a red bridge""),
56
+ (""A man with blond-hair, and a brown shirt drinking out of a public water fountain."", ""A blond man wearing a brown shirt is reading a book.""),
57
+ (""Mark Wahlberg was a fan of Manny."", ""Manny was a fan of Mark Wahlberg."")
58
+ ]
59
+
60
+ # Step 1: Load the model
61
+ model = AutoModelForSequenceClassification.from_pretrained(
62
+ 'vectara/hallucination_evaluation_model', trust_remote_code=True)
63
+
64
+ # Step 2: Use the model to predict
65
+ model.predict(pairs) # note the predict() method. Do not do model(pairs).
66
+ # tensor([0.0111, 0.6474, 0.1290, 0.8969, 0.1846, 0.0050, 0.0543])
67
+ ```
68
+
69
+ ### Using with `pipeline`
70
+
71
+ In the popular `pipeline` class of the `transformers` library, you have to manually prepare the data using the prompt template in which we trained the model. HHEM-2.1-Open has two output neurons, corresponding to the labels `hallucinated` and `consistent` respectively. In the example below, we will ask `pipeline` to return the scores for both labels (by setting `top_k=None`, formerly `return_all_scores=True`) and then extract the score for the `consistent` label.
72
+
73
+ ```python
74
+ from transformers import pipeline, AutoTokenizer
75
+
76
+ pairs = [ # Test data, List[Tuple[str, str]]
77
+ (""The capital of France is Berlin."", ""The capital of France is Paris.""),
78
+ ('I am in California', 'I am in United States.'),
79
+ ('I am in United States', 'I am in California.'),
80
+ (""A person on a horse jumps over a broken down airplane."", ""A person is outdoors, on a horse.""),
81
+ (""A boy is jumping on skateboard in the middle of a red bridge."", ""The boy skates down the sidewalk on a red bridge""),
82
+ (""A man with blond-hair, and a brown shirt drinking out of a public water fountain."", ""A blond man wearing a brown shirt is reading a book.""),
83
+ (""Mark Wahlberg was a fan of Manny."", ""Manny was a fan of Mark Wahlberg."")
84
+ ]
85
+
86
+ # Prompt the pairs
87
+ prompt = ""<pad> Determine if the hypothesis is true given the premise?\n\nPremise: {text1}\n\nHypothesis: {text2}""
88
+ input_pairs = [prompt.format(text1=pair[0], text2=pair[1]) for pair in pairs]
89
+
90
+ # Use text-classification pipeline to predict
91
+ classifier = pipeline(
92
+ ""text-classification"",
93
+ model='vectara/hallucination_evaluation_model',
94
+ tokenizer=AutoTokenizer.from_pretrained('google/flan-t5-base'),
95
+ trust_remote_code=True
96
+ )
97
+ full_scores = classifier(input_pairs, top_k=None) # List[List[Dict[str, float]]]
98
+
99
+ # Optional: Extract the scores for the 'consistent' label
100
+ simple_scores = [score_dict['score'] for score_for_both_labels in full_scores for score_dict in score_for_both_labels if score_dict['label'] == 'consistent']
101
+
102
+ print(simple_scores)
103
+ # Expected output: [0.011061512865126133, 0.6473632454872131, 0.1290171593427658, 0.8969419002532959, 0.18462494015693665, 0.005031010136008263, 0.05432349815964699]
104
+ ```
105
+
106
+ Of course, with `pipeline`, you can also get the most likely label, or the label with the highest score, by setting `top_k=1`.
107
+
108
+
109
+ ## HHEM-2.1-Open vs. HHEM-1.0
110
+
111
+ The major difference between HHEM-2.1-Open and the original HHEM-1.0 is that HHEM-2.1-Open has an unlimited context length, while HHEM-1.0 is capped at 512 tokens. The longer context length allows HHEM-2.1-Open to provide more accurate hallucination detection for RAG which often needs more than 512 tokens.
112
+
113
+ The tables below compare the two models on the [AggreFact](https://arxiv.org/pdf/2205.12854) and [RAGTruth](https://arxiv.org/abs/2401.00396) benchmarks, as well as GPT-3.5-Turbo and GPT-4. In particular, on AggreFact, we focus on its SOTA subset (denoted as `AggreFact-SOTA`) which contains summaries generated by Google's T5, Meta's BART, and Google's Pegasus, which are the three latest models in the AggreFact benchmark. The results on RAGTruth's summarization (denoted as `RAGTruth-Summ`) and QA (denoted as `RAGTruth-QA`) subsets are reported separately. The GPT-3.5-Turbo and GPT-4 versions are 01-25 and 06-13 respectively. The zero-shot results of the two GPT models were obtained using the prompt template in [this paper](https://arxiv.org/pdf/2303.15621).
114
+
115
+ Table 1: Performance on AggreFact-SOTA
116
+ | model | Balanced Accuracy | F1 | Recall | Precision |
117
+ |:------------------------|---------:|-------:|-------:|----------:|
118
+ | HHEM-1.0 | 78.87% | 90.47% | 70.81% | 67.27% |
119
+ | HHEM-2.1-Open | 76.55% | 66.77% | 68.48% | 65.13% |
120
+ | GPT-3.5-Turbo zero-shot | 72.19% | 60.88% | 58.48% | 63.49% |
121
+ | GPT-4 06-13 zero-shot | 73.78% | 63.87% | 53.03% | 80.28% |
122
+
123
+ Table 2: Performance on RAGTruth-Summ
124
+ | model | Balanced Accuracy | F1 | Recall | Precision |
125
+ |:----------------------|---------:|-----------:|----------:|----------:|
126
+ | HHEM-1.0 | 53.36% | 15.77% | 9.31% | 51.35% |
127
+ | HHEM-2.1-Open | 64.42% | 44.83% | 31.86% | 75.58% |
128
+ | GPT-3.5-Turbo zero-shot | 58.49% | 29.72% | 18.14% | 82.22% |
129
+ | GPT-4 06-13 zero-shot | 62.62% | 40.59% | 26.96% | 82.09% |
130
+
131
+ Table 3: Performance on RAGTruth-QA
132
+ | model | Balanced Accuracy | F1 | Recall | Precision |
133
+ |:----------------------|---------:|-----------:|----------:|----------:|
134
+ | HHEM-1.0 | 52.58% | 19.40% | 16.25% | 24.07% |
135
+ | HHEM-2.1-Open | 74.28% | 60.00% | 54.38% | 66.92% |
136
+ | GPT-3.5-Turbo zero-shot | 56.16% | 25.00% | 18.13% | 40.28% |
137
+ | GPT-4 06-13 zero-shot | 74.11% | 57.78% | 56.88% | 58.71% |
138
+
139
+ The tables above show that HHEM-2.1-Open has a significant improvement over HHEM-1.0 in the RAGTruth-Summ and RAGTruth-QA benchmarks, while it has a slight decrease in the AggreFact-SOTA benchmark. However, when interpreting these results, please note that AggreFact-SOTA is evaluated on relatively older types of LLMs:
140
+ - LLMs in AggreFact-SOTA: T5, BART, and Pegasus;
141
+ - LLMs in RAGTruth: GPT-4-0613, GPT-3.5-turbo-0613, Llama-2-7B/13B/70B-chat, and Mistral-7B-instruct.
142
+
143
+ ## HHEM-2.1-Open vs. GPT-3.5-Turbo and GPT-4
144
+
145
+ From the tables above we can also conclude that HHEM-2.1-Open outperforms both GPT-3.5-Turbo and GPT-4 in all three benchmarks. The quantitative advantage of HHEM-2.1-Open over GPT-3.5-Turbo and GPT-4 is summarized in Table 4 below.
146
+
147
+ Table 4: Percentage points of HHEM-2.1-Open's balanced accuracies over GPT-3.5-Turbo and GPT-4
148
+ | | AggreFact-SOTA | RAGTruth-Summ | RAGTruth-QA |
149
+ |:----------------------|---------:|-----------:|----------:|
150
+ | HHEM-2.1-Open **over** GPT-3.5-Turbo | 4.36% | 5.93% | 18.12% |
151
+ | HHEM-2.1-Open **over** GPT-4 | 2.64% | 1.80% | 0.17% |
152
+
153
+ Another advantage of HHEM-2.1-Open is its efficiency. HHEM-2.1-Open can be run on consumer-grade hardware, occupying less than 600MB RAM space at 32-bit precision and elapsing around 1.5 second for a 2k-token input on a modern x86 CPU.
154
+
155
+ ## HHEM-2.1: The more powerful, proprietary counterpart of HHEM-2.1-Open
156
+
157
+ As you may have already sensed from the name, HHEM-2.1-Open is the open source version of the premium HHEM-2.1. HHEM-2.1 (without the `-Open`) is offered exclusively via Vectara's RAG-as-a-service platform. The major difference between HHEM-2.1 and HHEM-2.1-Open is that HHEM-2.1 is cross-lingual on three languages: English, German, and French, while HHEM-2.1-Open is English-only. ""Cross-lingual"" means any combination of the three languages, e.g., documents in German, query in English, results in French.
158
+
159
+ ### Why RAG in Vectara?
160
+
161
+ Vectara provides a Trusted Generative AI platform. The platform allows organizations to rapidly create an AI assistant experience which is grounded in the data, documents, and knowledge that they have. Vectara's serverless RAG-as-a-Service also solves critical problems required for enterprise adoption, namely: reduces hallucination, provides explainability / provenance, enforces access control, allows for real-time updatability of the knowledge, and mitigates intellectual property / bias concerns from large language models.
162
+
163
+ To start benefiting from HHEM-2.1, you can [sign up](https://console.vectara.com/signup/?utm_source=huggingface&utm_medium=space&utm_term=hhem-model&utm_content=console&utm_campaign=) for a Vectara account, and you will get the HHEM-2.1 score returned with every query automatically.
164
+
165
+ Here are some additional resources:
166
+ 1. Vectara [API documentation](https://docs.vectara.com/docs).
167
+ 2. Quick start using Forrest's [`vektara` package](https://vektara.readthedocs.io/en/latest/crash_course.html).
168
+ 3. Learn more about Vectara's [Boomerang embedding model](https://vectara.com/blog/introducing-boomerang-vectaras-new-and-improved-retrieval-model/), [Slingshot reranker](https://vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages/), and [Mockingbird LLM](https://vectara.com/blog/mockingbird-a-rag-and-structured-output-focused-llm/)
169
+
170
+ ## LLM Hallucination Leaderboard
171
+ If you want to stay up to date with results of the latest tests using this model to evaluate the top LLM models, we have a [public leaderboard](https://huggingface.co/spaces/vectara/leaderboard) that is periodically updated, and results are also available on the [GitHub repository](https://github.com/vectara/hallucination-leaderboard).
172
+
173
+ # Cite this model
174
+
175
+ ```bibtex
176
+ @misc {hhem-2.1-open,
177
+ author = {Forrest Bao and Miaoran Li and Rogger Luo and Ofer Mendelevitch},
178
+ title = {{HHEM-2.1-Open}},
179
+ year = 2024,
180
+ url = { https://huggingface.co/vectara/hallucination_evaluation_model },
181
+ doi = { 10.57967/hf/3240 },
182
+ publisher = { Hugging Face }
183
+ }
184
+ ```","{""id"": ""vectara/hallucination_evaluation_model"", ""author"": ""vectara"", ""sha"": ""b3973afb9f9595a40bb8403b46c6dac9c26d16d5"", ""last_modified"": ""2024-10-30 17:03:42+00:00"", ""created_at"": ""2023-10-25 19:03:42+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 230813, ""downloads_all_time"": null, ""likes"": 280, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""HHEMv2Config"", ""text-classification"", ""custom_code"", ""en"", ""arxiv:2205.12854"", ""arxiv:2401.00396"", ""arxiv:2303.15621"", ""base_model:google/flan-t5-base"", ""base_model:finetune:google/flan-t5-base"", ""doi:10.57967/hf/3240"", ""license:apache-2.0"", ""autotrain_compatible"", ""region:us""], ""pipeline_tag"": ""text-classification"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: google/flan-t5-base\nlanguage: en\nlicense: apache-2.0\npipline_tag: text-classficiation"", ""widget_data"": [{""text"": ""I like you. I love you""}], ""model_index"": null, ""config"": {""architectures"": [""HHEMv2ForSequenceClassification""], ""auto_map"": {""AutoConfig"": ""configuration_hhem_v2.HHEMv2Config"", ""AutoModelForSequenceClassification"": ""modeling_hhem_v2.HHEMv2ForSequenceClassification""}, ""model_type"": ""HHEMv2Config""}, ""transformers_info"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": ""modeling_hhem_v2.HHEMv2ForSequenceClassification"", ""pipeline_tag"": ""text-classification"", ""processor"": null}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='candle.png', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='configuration_hhem_v2.py', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='modeling_hhem_v2.py', size=None, blob_id=None, lfs=None)""], ""spaces"": [""vectara/leaderboard"", ""TeamTonic/MultiMed"", ""jayash391/RAG_MedMind"", ""itsJB/Fact-Checked"", ""Tonic/MultiMedTulu"", ""girgis/Cloudilic-Demo"", ""eaglelandsonce/Breaking-Free-Hackathon"", ""jimshadow666/vectara-hallucination_evaluation_model"", ""TeamTonic/TruEraMultiMed"", ""subhanliaqat/hhem"", ""eaglelandsonce/hhem"", ""ahmadtalha/hhem"", ""pyresearch/KitchenCreators"", ""Tonic/SureRAG"", ""Johan713/MedMind01"", ""abidlabs/HHEM"", ""ranavikas/NEXUS"", ""Prathamesh1420/hallucination_checker""], ""safetensors"": {""parameters"": {""F32"": 109630082}, ""total"": 109630082}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-10-30 17:03:42+00:00"", ""cardData"": ""base_model: google/flan-t5-base\nlanguage: en\nlicense: apache-2.0\npipline_tag: text-classficiation"", ""transformersInfo"": {""auto_model"": ""AutoModelForSequenceClassification"", ""custom_class"": ""modeling_hhem_v2.HHEMv2ForSequenceClassification"", ""pipeline_tag"": ""text-classification"", ""processor"": null}, ""_id"": ""6539668e4037a1bc44ff9bf9"", ""modelId"": ""vectara/hallucination_evaluation_model"", ""usedStorage"": 1935923915}",0,,0,,0,,0,,0,"TeamTonic/MultiMed, TeamTonic/TruEraMultiMed, Tonic/MultiMedTulu, eaglelandsonce/Breaking-Free-Hackathon, eaglelandsonce/hhem, girgis/Cloudilic-Demo, huggingface/InferenceSupport/discussions/new?title=vectara/hallucination_evaluation_model&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bvectara%2Fhallucination_evaluation_model%5D(%2Fvectara%2Fhallucination_evaluation_model)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, itsJB/Fact-Checked, jayash391/RAG_MedMind, jimshadow666/vectara-hallucination_evaluation_model, pyresearch/KitchenCreators, subhanliaqat/hhem, vectara/leaderboard",13
layoutlm-document-qa_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ impira/layoutlm-document-qa,"---
3
+ language: en
4
+ license: mit
5
+ pipeline_tag: document-question-answering
6
+ tags:
7
+ - layoutlm
8
+ - document-question-answering
9
+ - pdf
10
+ widget:
11
+ - text: ""What is the invoice number?""
12
+ src: ""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""
13
+ - text: ""What is the purchase amount?""
14
+ src: ""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg""
15
+ ---
16
+
17
+ # LayoutLM for Visual Question Answering
18
+
19
+ This is a fine-tuned version of the multi-modal [LayoutLM](https://aka.ms/layoutlm) model for the task of question answering on documents. It has been fine-tuned using both the [SQuAD2.0](https://huggingface.co/datasets/squad_v2) and [DocVQA](https://www.docvqa.org/) datasets.
20
+
21
+ ## Getting started with the model
22
+
23
+ To run these examples, you must have [PIL](https://pillow.readthedocs.io/en/stable/installation.html), [pytesseract](https://pypi.org/project/pytesseract/), and [PyTorch](https://pytorch.org/get-started/locally/) installed in addition to [transformers](https://huggingface.co/docs/transformers/index).
24
+
25
+ ```python
26
+ from transformers import pipeline
27
+
28
+ nlp = pipeline(
29
+ ""document-question-answering"",
30
+ model=""impira/layoutlm-document-qa"",
31
+ )
32
+
33
+ nlp(
34
+ ""https://templates.invoicehome.com/invoice-template-us-neat-750px.png"",
35
+ ""What is the invoice number?""
36
+ )
37
+ # {'score': 0.9943977, 'answer': 'us-001', 'start': 15, 'end': 15}
38
+
39
+ nlp(
40
+ ""https://miro.medium.com/max/787/1*iECQRIiOGTmEFLdWkVIH2g.jpeg"",
41
+ ""What is the purchase amount?""
42
+ )
43
+ # {'score': 0.9912159, 'answer': '$1,000,000,000', 'start': 97, 'end': 97}
44
+
45
+ nlp(
46
+ ""https://www.accountingcoach.com/wp-content/uploads/2013/10/income-statement-example@2x.png"",
47
+ ""What are the 2020 net sales?""
48
+ )
49
+ # {'score': 0.59147286, 'answer': '$ 3,750', 'start': 19, 'end': 20}
50
+ ```
51
+
52
+ **NOTE**: This model and pipeline was recently landed in transformers via [PR #18407](https://github.com/huggingface/transformers/pull/18407) and [PR #18414](https://github.com/huggingface/transformers/pull/18414), so you'll need to use a recent version of transformers, for example:
53
+
54
+ ```bash
55
+ pip install git+https://github.com/huggingface/transformers.git@2ef774211733f0acf8d3415f9284c49ef219e991
56
+ ```
57
+
58
+ ## About us
59
+
60
+ This model was created by the team at [Impira](https://www.impira.com/).
61
+ ","{""id"": ""impira/layoutlm-document-qa"", ""author"": ""impira"", ""sha"": ""beed3c4d02d86017ebca5bd0fdf210046b907aa6"", ""last_modified"": ""2023-03-18 00:54:24+00:00"", ""created_at"": ""2022-08-07 21:07:19+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 33180, ""downloads_all_time"": null, ""likes"": 1101, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""tf"", ""safetensors"", ""layoutlm"", ""document-question-answering"", ""pdf"", ""en"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""document-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""language: en\nlicense: mit\npipeline_tag: document-question-answering\ntags:\n- layoutlm\n- document-question-answering\n- pdf\nwidget:\n- text: What is the invoice number?\n src: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png\n- text: What is the purchase amount?\n src: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg"", ""widget_data"": [{""text"": ""What is the invoice number?"", ""src"": ""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""}, {""text"": ""What is the purchase amount?"", ""src"": ""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg""}], ""model_index"": null, ""config"": {""architectures"": [""LayoutLMForQuestionAnswering""], ""model_type"": ""layoutlm"", ""tokenizer_config"": {""unk_token"": ""<unk>"", ""bos_token"": ""<s>"", ""eos_token"": ""</s>"", ""sep_token"": ""</s>"", ""cls_token"": ""<s>"", ""pad_token"": ""<pad>"", ""mask_token"": ""<mask>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pyproject.toml', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='setup.cfg', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tf_model.h5', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""microsoft/HuggingGPT"", ""gunship999/SexyImages"", ""Yntec/ToyWorld"", ""impira/docquery"", ""llamameta/flux-pro-uncensored"", ""Uthar/SexyReality"", ""Nymbo/Compare-6"", ""Yntec/PrintingPress"", ""llamameta/fluxproV2"", ""freddyaboulton/gradio_pdf"", ""Yntec/ToyWorldXL"", ""phenixrhyder/NSFW-ToyWorld"", ""Yntec/blitz_diffusion"", ""Sarath0x8f/Document-QA-bot"", ""John6666/Diffusion80XX4sg"", ""gaunernst/layoutlm-docvqa-paddleocr"", ""John6666/PrintingPress4"", ""llamameta/fast-sd3.5-large"", ""martynka/TasiaExperiment"", ""yergyerg/ImgGenClone"", ""Yntec/Image-Models-Test-April-2024"", ""DemiPoto/TestDifs"", ""Abinivesh/Multi-models-prompt-to-image-generation"", ""Yntec/Image-Models-Test-2024"", ""Illia56/Ask_PDF_anything"", ""Yntec/Image-Models-Test"", ""John6666/hfd_test_nostopbutton"", ""Yntec/Image-Models-Test-May-2024"", ""taesiri/HuggingGPT-Lite"", ""Nymbo/Diffusion80XX4sg"", ""Yntec/Image-Models-Test-September-2024"", ""DemiPoto/testSortModels"", ""Msp/Document_Parser"", ""Aeon-Avinash/GenAI_Document_QnA_with_Vision"", ""itsanurag/Invoice_reader2"", ""Yntec/MiniToyWorld"", ""kaleidoskop-hug/PrintingPress"", ""NativeAngels/Compare-6"", ""John6666/ToyWorld4"", ""John6666/Diffusion80XX4g"", ""SAITAN666/StableDiffusion35Large-Image-Models-Test-November-2024"", ""NativeAngels/HuggingfaceDiffusion"", ""Yntec/Image-Models-Test-December-2024"", ""huggingfacejs/doc-vis-qa"", ""vincentclaes/DocumentQAComparator"", ""John6666/Diffusion80XX4"", ""K00B404/HuggingfaceDiffusion_custom"", ""John6666/blitz_diffusion4"", ""John6666/blitz_diffusion_builtin"", ""K00B404/SimpleBrothel"", ""os1187/docquery"", ""abidlabs/docquery"", ""Gokul14/impira-layoutlm-document-qa"", ""rishabh062/DocumentQuestionAnswerModel"", ""Yntec/Image-Models-Test-July-2024"", ""Blane187/multi-diffusion"", ""NativeAngels/ToyWorld"", ""Uthar/LewdExperiments"", ""Uthar/BodyPaint"", ""Uthar/HRGiger"", ""Uthar/HighFashion"", ""Yntec/open-craiyon"", ""Yntec/Image-Models-Test-January-2025"", ""king007/docquery"", ""TensoraCO/docquery"", ""freddyaboulton/document-analyzer"", ""awacke1/Gradio-PDF-Document-QA-PDF-UI"", ""krishnapal2308/DocVQA-Sanctum"", ""freddyaboulton/gradiopdf"", ""Yeeezus/SexyImages"", ""John6666/MiniToyWorld"", ""Yntec/Image-Models-Test-March-2025"", ""ccarr0807/HuggingGPT"", ""theholycityweb/HuggingGPT"", ""avin1103/SLAM"", ""tribe-ai/document-qa-comparator"", ""awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary"", ""dog/fastapi-document-qa"", ""Alfasign/HuggingGPT-Lite"", ""aaravlovescodes/fastapi-document-qa"", ""kevinbednarek/docquery"", ""sysopo/impira-layoutlm-document-qa"", ""Faizanshaikh/pdf-chatbot"", ""harsha70/docquery-test"", ""saurshaz/HuggingGPT"", ""Ekittl01/impira-layoutlm-document-qa"", ""dolo650/QA_my_PDF"", ""IAMTFRMZA/Ask_PDF_anything"", ""Tomoniai/DocChat"", ""umair894/fastapi-document-qa_semantic"", ""nasirbloch323/chatpdf"", ""awacke1/Document-Image-Search-and-Query"", ""McCoasta/docReader"", ""NativeAngels/blitz_diffusion"", ""NativeAngels/PrintingPress4"", ""NativeAngels/PrintingPress"", ""dehua68/ToyWorld"", ""tushar888/Zeinural.ai"", ""Uthar/Harvester"", ""JarvisOnSolana/Jarvis""], ""safetensors"": {""parameters"": {""I64"": 514, ""F32"": 127792898}, ""total"": 127793412}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-03-18 00:54:24+00:00"", ""cardData"": ""language: en\nlicense: mit\npipeline_tag: document-question-answering\ntags:\n- layoutlm\n- document-question-answering\n- pdf\nwidget:\n- text: What is the invoice number?\n src: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png\n- text: What is the purchase amount?\n src: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg"", ""transformersInfo"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""_id"": ""62f02987e57174582cd334e3"", ""modelId"": ""impira/layoutlm-document-qa"", ""usedStorage"": 4090788015}",0,"https://huggingface.co/kanansharmaa/layoutlm_document_qa, https://huggingface.co/harishussain12/File_Reader",2,,0,,0,,0,"John6666/Diffusion80XX4sg, Sarath0x8f/Document-QA-bot, Uthar/SexyReality, Yntec/PrintingPress, Yntec/ToyWorld, Yntec/ToyWorldXL, Yntec/blitz_diffusion, freddyaboulton/gradio_pdf, gunship999/SexyImages, huggingface/InferenceSupport/discussions/new?title=impira/layoutlm-document-qa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bimpira%2Flayoutlm-document-qa%5D(%2Fimpira%2Flayoutlm-document-qa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, llamameta/flux-pro-uncensored, llamameta/fluxproV2, phenixrhyder/NSFW-ToyWorld",13
62
+ kanansharmaa/layoutlm_document_qa,"---
63
+ license: mit
64
+ base_model: impira/layoutlm-document-qa
65
+ tags:
66
+ - generated_from_trainer
67
+ model-index:
68
+ - name: layoutlm_document_qa
69
+ results: []
70
+ ---
71
+
72
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
73
+ should probably proofread and complete it, then remove this comment. -->
74
+
75
+ # layoutlm_document_qa
76
+
77
+ This model is a fine-tuned version of [impira/layoutlm-document-qa](https://huggingface.co/impira/layoutlm-document-qa) on an unknown dataset.
78
+
79
+ ## Model description
80
+
81
+ More information needed
82
+
83
+ ## Intended uses & limitations
84
+
85
+ More information needed
86
+
87
+ ## Training and evaluation data
88
+
89
+ More information needed
90
+
91
+ ## Training procedure
92
+
93
+ ### Training hyperparameters
94
+
95
+ The following hyperparameters were used during training:
96
+ - learning_rate: 5e-05
97
+ - train_batch_size: 4
98
+ - eval_batch_size: 8
99
+ - seed: 42
100
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
101
+ - lr_scheduler_type: linear
102
+ - num_epochs: 20
103
+
104
+ ### Framework versions
105
+
106
+ - Transformers 4.39.0.dev0
107
+ - Pytorch 2.2.0+cu121
108
+ - Datasets 2.17.1.dev0
109
+ - Tokenizers 0.15.2
110
+ ","{""id"": ""kanansharmaa/layoutlm_document_qa"", ""author"": ""kanansharmaa"", ""sha"": ""ce53cb17064dfe9b8000f240036cba0b41640f40"", ""last_modified"": ""2024-02-22 11:59:27+00:00"", ""created_at"": ""2024-02-22 11:51:25+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 3, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""tensorboard"", ""safetensors"", ""layoutlm"", ""document-question-answering"", ""generated_from_trainer"", ""base_model:impira/layoutlm-document-qa"", ""base_model:finetune:impira/layoutlm-document-qa"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""document-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model: impira/layoutlm-document-qa\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: layoutlm_document_qa\n results: []"", ""widget_data"": null, ""model_index"": [{""name"": ""layoutlm_document_qa"", ""results"": []}], ""config"": {""architectures"": [""LayoutLMForQuestionAnswering""], ""model_type"": ""layoutlm"", ""tokenizer_config"": {""cls_token"": ""[CLS]"", ""mask_token"": ""[MASK]"", ""pad_token"": ""[PAD]"", ""sep_token"": ""[SEP]"", ""unk_token"": ""[UNK]""}}, ""transformers_info"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb22_11-51-22_innobit/events.out.tfevents.1708602685.innobit.821124.0', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb22_11-51-22_innobit/events.out.tfevents.1708602838.innobit.821124.1', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb22_11-51-22_innobit/events.out.tfevents.1708602858.innobit.821124.2', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='runs/Feb22_11-54-39_innobit/events.out.tfevents.1708602879.innobit.821124.3', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.txt', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 127792898}, ""total"": 127792898}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-02-22 11:59:27+00:00"", ""cardData"": ""base_model: impira/layoutlm-document-qa\nlicense: mit\ntags:\n- generated_from_trainer\nmodel-index:\n- name: layoutlm_document_qa\n results: []"", ""transformersInfo"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""_id"": ""65d7353df930b2bb12962a3e"", ""modelId"": ""kanansharmaa/layoutlm_document_qa"", ""usedStorage"": 511220324}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=kanansharmaa/layoutlm_document_qa&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bkanansharmaa%2Flayoutlm_document_qa%5D(%2Fkanansharmaa%2Flayoutlm_document_qa)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
111
+ harishussain12/File_Reader,"---
112
+ datasets:
113
+ - p1604/healthcare
114
+ language:
115
+ - en
116
+ base_model:
117
+ - impira/layoutlm-document-qa
118
+ library_name: transformers
119
+ ---","{""id"": ""harishussain12/File_Reader"", ""author"": ""harishussain12"", ""sha"": ""316b371e2018b16afcb6bfc14c0a04c77a285f99"", ""last_modified"": ""2024-12-12 11:27:54+00:00"", ""created_at"": ""2024-12-12 11:23:00+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 1, ""downloads_all_time"": null, ""likes"": 0, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""layoutlm"", ""document-question-answering"", ""en"", ""dataset:p1604/healthcare"", ""base_model:impira/layoutlm-document-qa"", ""base_model:finetune:impira/layoutlm-document-qa"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""document-question-answering"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""base_model:\n- impira/layoutlm-document-qa\ndatasets:\n- p1604/healthcare\nlanguage:\n- en\nlibrary_name: transformers"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""LayoutLMForQuestionAnswering""], ""model_type"": ""layoutlm"", ""tokenizer_config"": {""bos_token"": ""<s>"", ""cls_token"": ""<s>"", ""eos_token"": ""</s>"", ""mask_token"": ""<mask>"", ""pad_token"": ""<pad>"", ""sep_token"": ""</s>"", ""unk_token"": ""<unk>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 127792898}, ""total"": 127792898}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-12-12 11:27:54+00:00"", ""cardData"": ""base_model:\n- impira/layoutlm-document-qa\ndatasets:\n- p1604/healthcare\nlanguage:\n- en\nlibrary_name: transformers"", ""transformersInfo"": {""auto_model"": ""AutoModelForDocumentQuestionAnswering"", ""custom_class"": null, ""pipeline_tag"": ""document-question-answering"", ""processor"": ""AutoTokenizer""}, ""_id"": ""675ac794a52b5ba3104d9263"", ""modelId"": ""harishussain12/File_Reader"", ""usedStorage"": 511196416}",1,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=harishussain12/File_Reader&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bharishussain12%2FFile_Reader%5D(%2Fharishussain12%2FFile_Reader)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
lora_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
The diff for this file is too large to render. See raw diff
 
multilingual-e5-base_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ef6dd7813a906a8c07f73571dd27e17a5feea1a99e49ffdf1a1447c37e3d977
3
+ size 11720967
paraphrase-multilingual-MiniLM-L12-v2_finetunes_20250424_223250.csv_finetunes_20250424_223250.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:698619e91034b55c372cdb8bde76d451cbcc511ee485646f2e0785d5a72eb80f
3
+ size 16764136
potat1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ camenduru/potat1,"---
3
+ thumbnail: https://user-images.githubusercontent.com/54370274/243292723-fa703668-a931-41e1-8bcf-19c72203980b.png
4
+ tags:
5
+ - TextTovideo
6
+ - Text2Video
7
+ - text-to-video
8
+ ---
9
+
10
+ 🐣 Please follow me for new updates https://twitter.com/camenduru <br />
11
+ 🔥 Please join our discord server https://discord.gg/k5BwmmvJJU
12
+
13
+ ![00041-3056174990](https://github.com/camenduru/Text-To-Video-Finetuning-colab/assets/54370274/fa703668-a931-41e1-8bcf-19c72203980b)
14
+
15
+ # Potat 1️⃣
16
+ First Open-Source 1024x576 Text To Video Model 🥳
17
+
18
+ https://huggingface.co/vdo/potat1-5000/tree/main <br />
19
+ https://huggingface.co/vdo/potat1-10000/tree/main <br />
20
+ https://huggingface.co/vdo/potat1-10000-base-text-encoder/tree/main <br />
21
+ https://huggingface.co/vdo/potat1-15000/tree/main <br />
22
+ https://huggingface.co/vdo/potat1-20000/tree/main <br />
23
+ https://huggingface.co/vdo/potat1-25000/tree/main <br />
24
+ https://huggingface.co/vdo/potat1-30000/tree/main <br />
25
+ https://huggingface.co/vdo/potat1-35000/tree/main <br />
26
+ https://huggingface.co/vdo/potat1-40000/tree/main <br />
27
+ https://huggingface.co/vdo/potat1-45000/tree/main <br />
28
+ https://huggingface.co/vdo/potat1-50000/tree/main <br />
29
+ https://huggingface.co/vdo/potat1-50000-base-text-encoder/tree/main = https://huggingface.co/camenduru/potat1 (you are here) <br />
30
+
31
+
32
+ ### Info
33
+ Prototype Model <br />
34
+ Trained with https://lambdalabs.com ❤ 1xA100 (40GB) <br />
35
+ 2197 clips, 68388 tagged frames ( [salesforce/blip2-opt-6.7b-coco](https://huggingface.co/Salesforce/blip2-opt-6.7b-coco) ) <br />
36
+ train_steps: 10000 <br />
37
+
38
+ ### Dataset & Config
39
+ https://huggingface.co/camenduru/potat1_dataset/tree/main
40
+
41
+ ### Finetuning
42
+ https://github.com/Breakthrough/PySceneDetect <br />
43
+ https://github.com/ExponentialML/Video-BLIP2-Preprocessor <br />
44
+ https://github.com/ExponentialML/Text-To-Video-Finetuning <br />
45
+ https://github.com/camenduru/Text-To-Video-Finetuning-colab <br />
46
+
47
+ ### Base Model
48
+ https://huggingface.co/damo-vilab/modelscope-damo-text-to-video-synthesis <br />
49
+ https://www.modelscope.cn/models/damo/text-to-video-synthesis <br />
50
+
51
+ Thanks to [damo-vilab](https://damo.alibaba.com/) ❤ [ExponentialML](https://github.com/ExponentialML) ❤ [kabachuha](https://github.com/kabachuha) ❤ [@DiffusersLib](https://twitter.com/DiffusersLib) ❤ [@LambdaAPI](https://twitter.com/LambdaAPI) ❤ [@cerspense](https://twitter.com/cerspense) ❤ [@CiaraRowles1](https://twitter.com/CiaraRowles1) ❤ [@p1atdev_art](https://twitter.com/p1atdev_art) ❤ <br />
52
+
53
+ Thanks to Orellius ❤ (important bug report) <br />
54
+
55
+ Please try it 🐣 <br />
56
+ https://github.com/camenduru/text-to-video-synthesis-colab <br />
57
+
58
+ <video src=""https://github-production-user-asset-6210df.s3.amazonaws.com/54370274/244223223-c5201c8a-2815-4533-9474-1e312c564f4e.mp4"" data-canonical-src=""https://github-production-user-asset-6210df.s3.amazonaws.com/54370274/244223223-c5201c8a-2815-4533-9474-1e312c564f4e.mp4"" controls=""controls"" muted=""muted"" class=""d-block rounded-bottom-2 border-top width-fit"" style=""max-height:640px; min-height: 200px""></video>
59
+
60
+ Potat 2️⃣ is in the oven ♨ <br />","{""id"": ""camenduru/potat1"", ""author"": ""camenduru"", ""sha"": ""0025067347b65c89c56353259c41912b5de6fe7e"", ""last_modified"": ""2023-06-15 19:34:55+00:00"", ""created_at"": ""2023-06-05 04:06:10+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 56, ""downloads_all_time"": null, ""likes"": 159, ""library_name"": ""diffusers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""diffusers"", ""TextTovideo"", ""Text2Video"", ""text-to-video"", ""diffusers:TextToVideoSDPipeline"", ""region:us""], ""pipeline_tag"": ""text-to-video"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""tags:\n- TextTovideo\n- Text2Video\n- text-to-video\nthumbnail: https://user-images.githubusercontent.com/54370274/243292723-fa703668-a931-41e1-8bcf-19c72203980b.png"", ""widget_data"": null, ""model_index"": null, ""config"": {""diffusers"": {""_class_name"": ""TextToVideoSDPipeline""}}, ""transformers_info"": null, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model_index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='scheduler/scheduler_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='text_encoder/pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer/vocab.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='unet/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vae/diffusion_pytorch_model.bin', size=None, blob_id=None, lfs=None)""], ""spaces"": [""PeepDaSlan9/Language-Learn-Idea""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2023-06-15 19:34:55+00:00"", ""cardData"": ""tags:\n- TextTovideo\n- Text2Video\n- text-to-video\nthumbnail: https://user-images.githubusercontent.com/54370274/243292723-fa703668-a931-41e1-8bcf-19c72203980b.png"", ""transformersInfo"": null, ""_id"": ""647d5f321c0644de8d3f41f4"", ""modelId"": ""camenduru/potat1"", ""usedStorage"": 12207790091}",0,,0,,0,,0,,0,"PeepDaSlan9/Language-Learn-Idea, huggingface/InferenceSupport/discussions/new?title=camenduru/potat1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bcamenduru%2Fpotat1%5D(%2Fcamenduru%2Fpotat1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A",2
roberta-base_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:637ef0790d33df8c6ec8dac6adcd8bcbc9e31f34786a7447034a304d2b379156
3
+ size 13645196
t5-small_finetunes_20250425_165642.csv_finetunes_20250425_165642.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccd4ee88af5dc80a0c6d0399137b3f1327147d56b30347365e5c14dd10a0e22e
3
+ size 13019509
text2image-prompt-generator_finetunes_20250426_171734.csv_finetunes_20250426_171734.csv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ succinctly/text2image-prompt-generator,"---
3
+ language:
4
+ - ""en""
5
+ thumbnail: ""https://drive.google.com/uc?export=view&id=1JWwrxQbr1s5vYpIhPna_p2IG1pE5rNiV""
6
+ tags:
7
+ - text2image
8
+ - prompting
9
+ license: ""cc-by-2.0""
10
+ datasets:
11
+ - ""succinctly/midjourney-prompts""
12
+ ---
13
+
14
+ This is a GPT-2 model fine-tuned on the [succinctly/midjourney-prompts](https://huggingface.co/datasets/succinctly/midjourney-prompts) dataset, which contains 250k text prompts that users issued to the [Midjourney](https://www.midjourney.com/) text-to-image service over a month period. For more details on how this dataset was scraped, see [Midjourney User Prompts & Generated Images (250k)](https://www.kaggle.com/datasets/succinctlyai/midjourney-texttoimage).
15
+
16
+ This prompt generator can be used to auto-complete prompts for any text-to-image model (including the DALL·E family):
17
+ ![prompt autocomplete model](https://drive.google.com/uc?export=view&id=1JqZ-CaWNpQ4iO0Qcd3b8u_QnBp-Q0PKu)
18
+
19
+
20
+ Note that, while this model can be used together with any text-to-image model, it occasionally produces Midjourney-specific tags. Users can specify certain requirements via [double-dashed parameters](https://midjourney.gitbook.io/docs/imagine-parameters) (e.g. `--ar 16:9` sets the aspect ratio to 16:9, and `--no snake` asks the model to exclude snakes from the generated image) or set the importance of various entities in the image via [explicit weights](https://midjourney.gitbook.io/docs/user-manual#advanced-text-weights) (e.g. `hot dog::1.5 food::-1` is likely to produce the image of an animal instead of a frankfurter).
21
+
22
+
23
+ When using this model, please attribute credit to [Succinctly AI](https://succinctly.ai).","{""id"": ""succinctly/text2image-prompt-generator"", ""author"": ""succinctly"", ""sha"": ""b7e96e38b77149daaded8f5101cdc81482330b4b"", ""last_modified"": ""2022-08-20 06:01:10+00:00"", ""created_at"": ""2022-07-21 22:17:43+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 52265, ""downloads_all_time"": null, ""likes"": 301, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""pytorch"", ""gpt2"", ""text-generation"", ""text2image"", ""prompting"", ""en"", ""dataset:succinctly/midjourney-prompts"", ""license:cc-by-2.0"", ""autotrain_compatible"", ""text-generation-inference"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": ""text-generation"", ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- succinctly/midjourney-prompts\nlanguage:\n- en\nlicense: cc-by-2.0\ntags:\n- text2image\n- prompting\nthumbnail: https://drive.google.com/uc?export=view&id=1JWwrxQbr1s5vYpIhPna_p2IG1pE5rNiV"", ""widget_data"": [{""text"": ""My name is Julien and I like to""}, {""text"": ""I like traveling by train because""}, {""text"": ""Paris is an amazing place to visit,""}, {""text"": ""Once upon a time,""}], ""model_index"": null, ""config"": {""architectures"": [""GPT2LMHeadModel""], ""model_type"": ""gpt2"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='.gitignore', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='pytorch_model.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='training_args.bin', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [""doevent/prompt-generator"", ""Alfasign/Midjourney_Prompt"", ""hahahafofo/image2text_prompt_generator"", ""hahahafofo/prompt_generator"", ""Crossbro/succinctly-text2image-prompt-generator"", ""Silence1412/Stable_Diffusion_Cpu"", ""omartine/prompt-generator"", ""Kvikontent/prompt-generator"", ""jmourad/TXT2IMG-MJ-Desc"", ""zox47/succinctly-text2image-prompt-generator"", ""johnsu6616/prompt-generator"", ""jordonpeter01/prompt-generator-public"", ""Txandim/succinctly-text2image-prompt-generator"", ""nwpuwolf/succinctly-text2image-prompt-generator"", ""liubing80386/succinctly-text2image-prompt-generator"", ""johnsu6616/TXT2IMG-MJ-Desc"", ""gentlemanhu/succinctly-text2image-prompt-generator"", ""johnsu6616/SD_Helper_01"", ""kbora/minerva-generate-docker"", ""Ashrafb/prompt-generator2"", ""zumat/succinctly-text2image-prompt-generator"", ""MadeWithAI/MWAI-Succinctly-Text2Image-Prompt-Generator"", ""amberheat77/Testttttt"", ""godandabove/succinctly-text2image-prompt-generator2"", ""johnmogi/succinctly-text2image-prompt-generator"", ""ttmouse/succinctly-text2image-prompt-generator"", ""lurenbushi/succinctly-text2image-prompt-generator"", ""he2022/pg"", ""johnsu6616/prompt_generator"", ""zox47/prompt"", ""Finnone/succinctly-text2image-prompt-generator"", ""HighVibesTimes/text-to-image-prompt-generator"", ""3mrology/Chameleon_Prompt_Engineering"", ""Isgrassmann/okay"", ""chaore/SD_Helper_01"", ""Isgrassmann/20.06"", ""CognifyAI/succinctly-text2image-prompt-generator"", ""chaore/prompt_generator"", ""gaurav09x/succinctly-text2image-prompt-generator"", ""FernandoMachado/prompt-generator"", ""Ashrafb/prompt-generator"", ""gleisonnanet/prompt-generator"", ""Zoory/Afvh"", ""Gyufyjk/prompt-generator"", ""immanuelzhu/prompt_generator"", ""NicolasJurado/promptgenerador1"", ""Shishtem/succinctly-text2image-prompt-generator"", ""flatindo/prompt-generator"", ""MadeWithAI/MWAI-prompt-generator"", ""Appu008/succinctly-text2image-prompt-generator"", ""ouphi/succinctly-text2image-prompt-generator"", ""Jesse-Mach/succinctly-text2image-prompt-generator"", ""matrex82/succinctly-text2image-prompt-generator"", ""Baseta/prompt-generator"", ""NicolasJurado/generaprompts"", ""patrickstauffacher/mj-prompt-generator"", ""K00B404/prompt_generator"", ""swervegod/succinctly-text2image-prompt-generator"", ""chaore/succinctly-prompt-generator"", ""SahaniJi/prompt-generator"", ""EdBoy2202/ImagetoEmoji"", ""Cmescobar27/minerva-generate-docker"", ""Josefernan/prompt-generator""], ""safetensors"": null, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2022-08-20 06:01:10+00:00"", ""cardData"": ""datasets:\n- succinctly/midjourney-prompts\nlanguage:\n- en\nlicense: cc-by-2.0\ntags:\n- text2image\n- prompting\nthumbnail: https://drive.google.com/uc?export=view&id=1JWwrxQbr1s5vYpIhPna_p2IG1pE5rNiV"", ""transformersInfo"": {""auto_model"": ""AutoModelForCausalLM"", ""custom_class"": null, ""pipeline_tag"": ""text-generation"", ""processor"": ""AutoTokenizer""}, ""_id"": ""62d9d087cfed764363b43770"", ""modelId"": ""succinctly/text2image-prompt-generator"", ""usedStorage"": 6647809983}",0,,0,,0,https://huggingface.co/ysn-rfd/text2image-prompt-generator-Q8_0-GGUF,1,,0,"Alfasign/Midjourney_Prompt, Ashrafb/prompt-generator, Ashrafb/prompt-generator2, Crossbro/succinctly-text2image-prompt-generator, EdBoy2202/ImagetoEmoji, Josefernan/prompt-generator, Shishtem/succinctly-text2image-prompt-generator, Silence1412/Stable_Diffusion_Cpu, Txandim/succinctly-text2image-prompt-generator, doevent/prompt-generator, hahahafofo/image2text_prompt_generator, huggingface/InferenceSupport/discussions/new?title=succinctly/text2image-prompt-generator&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Bsuccinctly%2Ftext2image-prompt-generator%5D(%2Fsuccinctly%2Ftext2image-prompt-generator)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A, omartine/prompt-generator",13
vit-base-patch16-224-in21k_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8027951d00859a1190648c8bb0a61a3aa3db5251c00e3f432c322a30c8c950f
3
+ size 15191327
whisper-large-v2_finetunes_20250424_193500.csv_finetunes_20250424_193500.csv ADDED
The diff for this file is too large to render. See raw diff
 
whisper-large-v3-turbo_finetunes_20250424_150612.csv_finetunes_20250424_150612.csv ADDED
The diff for this file is too large to render. See raw diff
 
whisper-medusa-v1_finetunes_20250427_003734.csv_finetunes_20250427_003734.csv ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_id,card,metadata,depth,children,children_count,adapters,adapters_count,quantized,quantized_count,merges,merges_count,spaces,spaces_count
2
+ aiola/whisper-medusa-v1,"---
3
+ license: mit
4
+ datasets:
5
+ - openslr/librispeech_asr
6
+ tags:
7
+ - ASR
8
+ - Automatic Speech Recognition
9
+ - Whisper
10
+ - Medusa
11
+ - Speech
12
+ - Speculative Decoding
13
+ ---
14
+
15
+ # Whisper Medusa
16
+
17
+ Whisper is an advanced encoder-decoder model for speech transcription and
18
+ translation, processing audio through encoding and decoding stages. Given
19
+ its large size and slow inference speed, various optimization strategies like
20
+ Faster-Whisper and Speculative Decoding have been proposed to enhance performance.
21
+ Our Medusa model builds on Whisper by predicting multiple tokens per iteration,
22
+ which significantly improves speed with small degradation in WER. We train and
23
+ evaluate our model on the LibriSpeech dataset, demonstrating speed improvements.
24
+
25
+ ---------
26
+
27
+ ## Training Details
28
+ `aiola/whisper-medusa-v1` was trained on the LibriSpeech dataset to perform audio translation.
29
+ The Medusa heads were optimized for English, so for optimal performance and speed improvements, please use English audio only.
30
+
31
+ ---------
32
+
33
+ ## Usage
34
+ To use `whisper-medusa-v1` install [`whisper-medusa`](https://github.com/aiola-lab/whisper-medusa) repo following the README instructions.
35
+
36
+ Inference can be done using the following code:
37
+ ```python
38
+ import torch
39
+ import torchaudio
40
+
41
+ from whisper_medusa import WhisperMedusaModel
42
+ from transformers import WhisperProcessor
43
+
44
+ model_name = ""aiola/whisper-medusa-v1""
45
+ model = WhisperMedusaModel.from_pretrained(model_name)
46
+ processor = WhisperProcessor.from_pretrained(model_name)
47
+
48
+ path_to_audio = ""path/to/audio.wav""
49
+ SAMPLING_RATE = 16000
50
+ language = ""en""
51
+ device = torch.device(""cuda"" if torch.cuda.is_available() else ""cpu"")
52
+
53
+ input_speech, sr = torchaudio.load(path_to_audio)
54
+ if sr != SAMPLING_RATE:
55
+ input_speech = torchaudio.transforms.Resample(sr, SAMPLING_RATE)(input_speech)
56
+
57
+ input_features = processor(input_speech.squeeze(), return_tensors=""pt"", sampling_rate=SAMPLING_RATE).input_features
58
+ input_features = input_features.to(device)
59
+
60
+ model = model.to(device)
61
+ model_output = model.generate(
62
+ input_features,
63
+ language=language,
64
+ )
65
+ predict_ids = model_output[0]
66
+ pred = processor.decode(predict_ids, skip_special_tokens=True)
67
+ print(pred)
68
+
69
+ ```
70
+ ","{""id"": ""aiola/whisper-medusa-v1"", ""author"": ""aiola"", ""sha"": ""6ea7c2f47658cfc7f9c8d1c158a9fbdb33458462"", ""last_modified"": ""2024-08-03 19:21:29+00:00"", ""created_at"": ""2024-08-01 10:24:52+00:00"", ""private"": false, ""gated"": false, ""disabled"": false, ""downloads"": 7, ""downloads_all_time"": null, ""likes"": 178, ""library_name"": ""transformers"", ""gguf"": null, ""inference"": null, ""inference_provider_mapping"": null, ""tags"": [""transformers"", ""safetensors"", ""whisper"", ""ASR"", ""Automatic Speech Recognition"", ""Whisper"", ""Medusa"", ""Speech"", ""Speculative Decoding"", ""dataset:openslr/librispeech_asr"", ""license:mit"", ""endpoints_compatible"", ""region:us""], ""pipeline_tag"": null, ""mask_token"": null, ""trending_score"": null, ""card_data"": ""datasets:\n- openslr/librispeech_asr\nlicense: mit\ntags:\n- ASR\n- Automatic Speech Recognition\n- Whisper\n- Medusa\n- Speech\n- Speculative Decoding"", ""widget_data"": null, ""model_index"": null, ""config"": {""architectures"": [""WhisperMedusaModel""], ""model_type"": ""whisper"", ""tokenizer_config"": {""bos_token"": ""<|endoftext|>"", ""eos_token"": ""<|endoftext|>"", ""pad_token"": ""<|endoftext|>"", ""unk_token"": ""<|endoftext|>""}}, ""transformers_info"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": ""AutoProcessor""}, ""siblings"": [""RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='added_tokens.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='generation_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='merges.txt', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00001-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model-00002-of-00002.safetensors', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='model.safetensors.index.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='normalizer.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='preprocessor_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='special_tokens_map.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='tokenizer_config.json', size=None, blob_id=None, lfs=None)"", ""RepoSibling(rfilename='vocab.json', size=None, blob_id=None, lfs=None)""], ""spaces"": [], ""safetensors"": {""parameters"": {""F32"": 1561341440}, ""total"": 1561341440}, ""security_repo_status"": null, ""xet_enabled"": null, ""lastModified"": ""2024-08-03 19:21:29+00:00"", ""cardData"": ""datasets:\n- openslr/librispeech_asr\nlicense: mit\ntags:\n- ASR\n- Automatic Speech Recognition\n- Whisper\n- Medusa\n- Speech\n- Speculative Decoding"", ""transformersInfo"": {""auto_model"": ""AutoModel"", ""custom_class"": null, ""pipeline_tag"": null, ""processor"": ""AutoProcessor""}, ""_id"": ""66ab62749c463be2a8a8dc50"", ""modelId"": ""aiola/whisper-medusa-v1"", ""usedStorage"": 6245535512}",0,,0,,0,,0,,0,huggingface/InferenceSupport/discussions/new?title=aiola/whisper-medusa-v1&description=React%20to%20this%20comment%20with%20an%20emoji%20to%20vote%20for%20%5Baiola%2Fwhisper-medusa-v1%5D(%2Faiola%2Fwhisper-medusa-v1)%20to%20be%20supported%20by%20Inference%20Providers.%0A%0A(optional)%20Which%20providers%20are%20you%20interested%20in%3F%20(Novita%2C%20Hyperbolic%2C%20Together%E2%80%A6)%0A,1
whisper-small_finetunes_20250426_014322.csv_finetunes_20250426_014322.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ff5c0f1269ab68df8d258066596475b4f65fbfc9430a3efa85a2bbf69458905
3
+ size 18095951
whisper-tiny_finetunes_20250426_121513.csv_finetunes_20250426_121513.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d62934f6c719ec908b3dc1820a2501eeaf0d4a88fcd4605d2776ce3ba7bbaf92
3
+ size 13413733